diff --git a/clarity-types/src/tests/types/signatures.rs b/clarity-types/src/tests/types/signatures.rs index 97768778357..dd0e18ca9b2 100644 --- a/clarity-types/src/tests/types/signatures.rs +++ b/clarity-types/src/tests/types/signatures.rs @@ -15,13 +15,343 @@ use std::collections::HashSet; use crate::errors::CheckErrorKind; +use crate::representations::CONTRACT_MAX_NAME_LENGTH; use crate::types::TypeSignature::{BoolType, IntType, ListUnionType, UIntType}; use crate::types::signatures::{CallableSubtype, TypeSignature}; use crate::types::{ - MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, TraitIdentifier, - TupleTypeSignature, + BufferLength, MAX_TO_ASCII_BUFFER_LEN, MAX_TO_ASCII_RESULT_LEN, MAX_TYPE_DEPTH, + MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, QualifiedContractIdentifier, SequenceSubtype, + StringSubtype, StringUTF8Length, TraitIdentifier, TupleTypeSignature, WRAPPER_VALUE_SIZE, }; +#[test] +fn test_core_constants() { + assert_eq!(1_048_576, MAX_VALUE_SIZE); + assert_eq!(262_144, MAX_UTF8_VALUE_SIZE); + assert_eq!(1_048_571, MAX_TO_ASCII_RESULT_LEN); + assert_eq!(524_284, MAX_TO_ASCII_BUFFER_LEN); + assert_eq!(32, MAX_TYPE_DEPTH); + assert_eq!(1, WRAPPER_VALUE_SIZE); +} + +#[test] +fn test_buffer_length_try_from_u32_trait() { + let buffer = BufferLength::try_from(0_u32).unwrap(); + assert_eq!(0, buffer.get_value()); + + let buffer = BufferLength::try_from(MAX_VALUE_SIZE).unwrap(); + assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); + + let err = BufferLength::try_from(MAX_VALUE_SIZE + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_buffer_length_try_from_usize_trait() { + let buffer = BufferLength::try_from(0_usize).unwrap(); + assert_eq!(0, buffer.get_value()); + + let buffer = BufferLength::try_from(MAX_VALUE_SIZE as usize).unwrap(); + assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); + + let err = BufferLength::try_from(MAX_VALUE_SIZE as usize + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_buffer_length_try_from_i128_trait() { + let buffer = BufferLength::try_from(0_i128).unwrap(); + assert_eq!(0, buffer.get_value()); + + let buffer = BufferLength::try_from(MAX_VALUE_SIZE as i128).unwrap(); + assert_eq!(MAX_VALUE_SIZE, buffer.get_value()); + + let err = BufferLength::try_from(MAX_VALUE_SIZE as i128 + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); + + let err = BufferLength::try_from(-1_i128).unwrap_err(); + assert_eq!(CheckErrors::ValueOutOfBounds, err); +} + +#[test] +fn test_buffer_length_to_u32_using_from_trait() { + let buffer = BufferLength::new_unsafe(0); + assert_eq!(0, u32::from(&buffer)); + assert_eq!(0, u32::from(buffer)); +} + +#[test] +fn test_type_buffer_min_to_be_buffer_1() { + assert_eq!(TypeSignature::BUFFER_1, TypeSignature::BUFFER_MIN); +} + +#[test] +fn test_type_buffer_max() { + let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType( + BufferLength::new_unsafe(MAX_VALUE_SIZE), + )); + let actual = TypeSignature::BUFFER_MAX; + + assert_eq!(expected, actual); + assert_eq!( + 1_048_580, + actual.size().unwrap(), + "size should be 1_048_580" + ); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_buffer_1() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(1))); + let actual = TypeSignature::BUFFER_1; + + assert_eq!(expected, actual); + assert_eq!(5, actual.size().unwrap(), "size should be 5"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_buffer_20() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(20))); + let actual = TypeSignature::BUFFER_20; + + assert_eq!(expected, actual); + assert_eq!(24, actual.size().unwrap(), "size should be 24"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_buffer_32() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(32))); + let actual = TypeSignature::BUFFER_32; + + assert_eq!(expected, actual); + assert_eq!(36, actual.size().unwrap(), "size should be 36"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_buffer_33() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(33))); + let actual = TypeSignature::BUFFER_33; + + assert_eq!(expected, actual); + assert_eq!(37, actual.size().unwrap(), "size should be 37"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_buffer_64() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(64))); + let actual = TypeSignature::BUFFER_64; + + assert_eq!(expected, actual); + assert_eq!(68, actual.size().unwrap(), "size should be 68"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_buffer_65() { + let expected = + TypeSignature::SequenceType(SequenceSubtype::BufferType(BufferLength::new_unsafe(65))); + let actual = TypeSignature::BUFFER_65; + + assert_eq!(expected, actual); + assert_eq!(69, actual.size().unwrap(), "size should be 69"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_string_ascii_min() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(1), + ))); + let actual = TypeSignature::STRING_ASCII_MIN; + + assert_eq!(expected, actual); + assert_eq!(5, actual.size().unwrap(), "size should be 5"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_string_ascii_max() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(MAX_VALUE_SIZE), + ))); + let actual = TypeSignature::STRING_ASCII_MAX; + + assert_eq!(expected, actual); + assert_eq!( + 1_048_580, + actual.size().unwrap(), + "size should be 1_048_580" + ); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_string_ascii_40() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(40), + ))); + let actual = TypeSignature::STRING_ASCII_40; + + assert_eq!(expected, actual); + assert_eq!(44, actual.size().unwrap(), "size should be 44"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_string_utf8_length_try_from_u32_trait() { + let string = StringUTF8Length::try_from(0_u32).unwrap(); + assert_eq!(0, string.get_value()); + + let string = StringUTF8Length::try_from(1_u32).unwrap(); + assert_eq!(1, string.get_value()); + + let string = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE).unwrap(); + assert_eq!(MAX_UTF8_VALUE_SIZE, string.get_value()); + + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_string_utf8_length_try_from_usize_trait() { + let string = StringUTF8Length::try_from(0_usize).unwrap(); + assert_eq!(0, string.get_value()); + + let string = StringUTF8Length::try_from(1_usize).unwrap(); + assert_eq!(1, string.get_value()); + + let string = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as usize).unwrap(); + assert_eq!(MAX_UTF8_VALUE_SIZE, string.get_value()); + + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as usize + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); +} + +#[test] +fn test_string_utf8_length_try_from_i128_trait() { + let string = StringUTF8Length::try_from(0_i128).unwrap(); + assert_eq!(0, string.get_value()); + + let string = StringUTF8Length::try_from(1_i128).unwrap(); + assert_eq!(1, string.get_value()); + + let string = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as i128).unwrap(); + assert_eq!(MAX_UTF8_VALUE_SIZE, string.get_value()); + + let err = StringUTF8Length::try_from(MAX_UTF8_VALUE_SIZE as i128 + 1).unwrap_err(); + assert_eq!(CheckErrors::ValueTooLarge, err); + + let err = StringUTF8Length::try_from(-1_i128).unwrap_err(); + assert_eq!(CheckErrors::ValueOutOfBounds, err); +} + +#[test] +fn test_type_string_utf8_min() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( + StringUTF8Length::new_unsafe(1), + ))); + let actual = TypeSignature::STRING_UTF8_MIN; + + assert_eq!(expected, actual); + assert_eq!(8, actual.size().unwrap(), "size should be 8"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_string_utf8_max() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( + StringUTF8Length::new_unsafe(MAX_UTF8_VALUE_SIZE), + ))); + let actual = TypeSignature::STRING_UTF8_MAX; + + assert_eq!(expected, actual); + assert_eq!(TypeSignature::STRING_UTF8_MAX, actual); + assert_eq!( + 1_048_580, + actual.size().unwrap(), + "size should be 1_048_580" + ); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_string_utf8_40() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( + StringUTF8Length::new_unsafe(40), + ))); + let actual = TypeSignature::STRING_UTF8_40; + + assert_eq!(expected, actual); + assert_eq!(164, actual.size().unwrap(), "size should be 164"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_buffer_max_for_to_ascii_call() { + let expected = TypeSignature::SequenceType(SequenceSubtype::BufferType( + BufferLength::new_unsafe(MAX_TO_ASCII_BUFFER_LEN), + )); + let actual = TypeSignature::TO_ASCII_BUFFER_MAX; + + assert_eq!(expected, actual); + assert_eq!(524_288, actual.size().unwrap(), "size should be 524_288"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_string_max_ascii_for_to_ascii_call() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(MAX_TO_ASCII_RESULT_LEN), + ))); + let actual = TypeSignature::TO_ASCII_STRING_ASCII_MAX; + + assert_eq!(expected, actual); + assert_eq!( + 1_048_575, + actual.size().unwrap(), + "size should be 1_048_575" + ); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + +#[test] +fn test_type_string_max_ascii_for_contract_name() { + let expected = TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::new_unsafe(CONTRACT_MAX_NAME_LENGTH as u32), + ))); + let actual = TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX; + + assert_eq!(expected, actual); + assert_eq!(44, actual.size().unwrap(), "size should be 44"); + assert_eq!(5, actual.type_size().unwrap(), "type size should be 5"); + assert_eq!(1, actual.depth(), "depth should be 1"); +} + #[test] fn test_least_supertype() { let callables = [ @@ -68,8 +398,8 @@ fn test_least_supertype() { TypeSignature::BoolType, ), ( - (TypeSignature::NoType, TypeSignature::min_buffer().unwrap()), - TypeSignature::min_buffer().unwrap(), + (TypeSignature::NoType, TypeSignature::BUFFER_MIN), + TypeSignature::BUFFER_MIN, ), ( ( @@ -81,16 +411,13 @@ fn test_least_supertype() { ( ( TypeSignature::NoType, - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::new_ascii_type_checked(17), ), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::new_ascii_type_checked(17), ), ( - ( - TypeSignature::NoType, - TypeSignature::max_string_utf8().unwrap(), - ), - TypeSignature::max_string_utf8().unwrap(), + (TypeSignature::NoType, TypeSignature::STRING_UTF8_MAX), + TypeSignature::STRING_UTF8_MAX, ), ( (TypeSignature::NoType, TypeSignature::PrincipalType), @@ -169,11 +496,8 @@ fn test_least_supertype() { ((UIntType, UIntType), UIntType), ((BoolType, BoolType), BoolType), ( - ( - TypeSignature::max_buffer().unwrap(), - TypeSignature::max_buffer().unwrap(), - ), - TypeSignature::max_buffer().unwrap(), + (TypeSignature::BUFFER_MAX, TypeSignature::BUFFER_MAX), + TypeSignature::BUFFER_MAX, ), ( ( @@ -184,17 +508,17 @@ fn test_least_supertype() { ), ( ( - TypeSignature::bound_string_ascii_type(17).unwrap(), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::new_ascii_type_checked(17), + TypeSignature::new_ascii_type_checked(17), ), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::new_ascii_type_checked(17), ), ( ( - TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, + TypeSignature::STRING_UTF8_MAX, ), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ), ( (TypeSignature::PrincipalType, TypeSignature::PrincipalType), @@ -276,11 +600,8 @@ fn test_least_supertype() { let matched_pairs = [ ( - ( - TypeSignature::max_buffer().unwrap(), - TypeSignature::min_buffer().unwrap(), - ), - TypeSignature::max_buffer().unwrap(), + (TypeSignature::BUFFER_MAX, TypeSignature::BUFFER_MIN), + TypeSignature::BUFFER_MAX, ), ( ( @@ -291,17 +612,17 @@ fn test_least_supertype() { ), ( ( - TypeSignature::min_string_ascii().unwrap(), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::STRING_ASCII_MIN, + TypeSignature::new_ascii_type_checked(17), ), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::new_ascii_type_checked(17), ), ( ( - TypeSignature::min_string_utf8().unwrap(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MIN, + TypeSignature::STRING_UTF8_MAX, ), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_UTF8_MAX, ), ( ( @@ -354,7 +675,7 @@ fn test_least_supertype() { 5, ) .unwrap(), - TypeSignature::list_of(TypeSignature::min_buffer().unwrap(), 3).unwrap(), + TypeSignature::list_of(TypeSignature::BUFFER_MIN, 3).unwrap(), ), TypeSignature::list_of( TypeSignature::SequenceType(SequenceSubtype::BufferType( @@ -369,14 +690,14 @@ fn test_least_supertype() { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::min_string_ascii().unwrap(), + TypeSignature::STRING_ASCII_MIN, )]) .unwrap(), ), TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::new_ascii_type_checked(17), )]) .unwrap(), ), @@ -384,18 +705,17 @@ fn test_least_supertype() { TypeSignature::TupleType( TupleTypeSignature::try_from(vec![( "b".into(), - TypeSignature::bound_string_ascii_type(17).unwrap(), + TypeSignature::new_ascii_type_checked(17), )]) .unwrap(), ), ), ( ( - TypeSignature::new_option(TypeSignature::min_string_ascii().unwrap()).unwrap(), - TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17).unwrap()) - .unwrap(), + TypeSignature::new_option(TypeSignature::STRING_ASCII_MIN).unwrap(), + TypeSignature::new_option(TypeSignature::new_ascii_type_checked(17)).unwrap(), ), - TypeSignature::new_option(TypeSignature::bound_string_ascii_type(17).unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::new_ascii_type_checked(17)).unwrap(), ), ( ( @@ -427,22 +747,16 @@ fn test_least_supertype() { let bad_pairs = [ (IntType, UIntType), (BoolType, IntType), - ( - TypeSignature::max_buffer().unwrap(), - TypeSignature::max_string_ascii().unwrap(), - ), + (TypeSignature::BUFFER_MAX, TypeSignature::STRING_ASCII_MAX), ( TypeSignature::list_of(TypeSignature::UIntType, 42).unwrap(), TypeSignature::list_of(TypeSignature::IntType, 42).unwrap(), ), ( - TypeSignature::min_string_utf8().unwrap(), - TypeSignature::bound_string_ascii_type(17).unwrap(), - ), - ( - TypeSignature::min_string_utf8().unwrap(), - TypeSignature::min_buffer().unwrap(), + TypeSignature::STRING_UTF8_MIN, + TypeSignature::new_ascii_type_checked(17), ), + (TypeSignature::STRING_UTF8_MIN, TypeSignature::BUFFER_MIN), ( TypeSignature::TupleType( TupleTypeSignature::try_from(vec![("a".into(), TypeSignature::IntType)]).unwrap(), @@ -453,7 +767,7 @@ fn test_least_supertype() { ), ( TypeSignature::new_option(TypeSignature::IntType).unwrap(), - TypeSignature::new_option(TypeSignature::min_string_utf8().unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::STRING_UTF8_MIN).unwrap(), ), ( TypeSignature::new_response(TypeSignature::IntType, TypeSignature::BoolType).unwrap(), @@ -473,10 +787,7 @@ fn test_least_supertype() { TypeSignature::PrincipalType, ), (list_union.clone(), TypeSignature::PrincipalType), - ( - TypeSignature::min_string_ascii().unwrap(), - list_union_principals, - ), + (TypeSignature::STRING_ASCII_MIN, list_union_principals), ( TypeSignature::list_of( TypeSignature::SequenceType(SequenceSubtype::BufferType( @@ -485,23 +796,20 @@ fn test_least_supertype() { 5, ) .unwrap(), - TypeSignature::list_of(TypeSignature::min_string_ascii().unwrap(), 3).unwrap(), + TypeSignature::list_of(TypeSignature::STRING_ASCII_MIN, 3).unwrap(), ), ( TypeSignature::TupleType( - TupleTypeSignature::try_from(vec![( - "b".into(), - TypeSignature::min_string_ascii().unwrap(), - )]) - .unwrap(), + TupleTypeSignature::try_from(vec![("b".into(), TypeSignature::STRING_ASCII_MIN)]) + .unwrap(), ), TypeSignature::TupleType( TupleTypeSignature::try_from(vec![("b".into(), TypeSignature::UIntType)]).unwrap(), ), ), ( - TypeSignature::new_option(TypeSignature::min_string_ascii().unwrap()).unwrap(), - TypeSignature::new_option(TypeSignature::min_string_utf8().unwrap()).unwrap(), + TypeSignature::new_option(TypeSignature::STRING_ASCII_MIN).unwrap(), + TypeSignature::new_option(TypeSignature::STRING_UTF8_MIN).unwrap(), ), ( TypeSignature::new_response(TypeSignature::PrincipalType, list_union).unwrap(), @@ -526,14 +834,3 @@ fn test_least_supertype() { ); } } - -#[test] -fn test_type_signature_bound_string_ascii_type_returns_check_errors() { - let err = TypeSignature::bound_string_ascii_type(MAX_VALUE_SIZE + 1).unwrap_err(); - assert_eq!( - CheckErrorKind::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in ASCII Type".to_string() - ), - err - ); -} diff --git a/clarity-types/src/types/mod.rs b/clarity-types/src/types/mod.rs index d4f277b290a..42ff94e4a9c 100644 --- a/clarity-types/src/types/mod.rs +++ b/clarity-types/src/types/mod.rs @@ -33,20 +33,29 @@ use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash; pub use self::signatures::{ - AssetIdentifier, BUFF_1, BUFF_20, BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, BufferLength, - ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, - TypeSignature, + AssetIdentifier, BufferLength, ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, + TupleTypeSignature, TypeSignature, }; use crate::errors::{CheckErrorKind, InterpreterResult as Result, RuntimeError, VmInternalError}; use crate::representations::{ClarityName, ContractName, SymbolicExpression}; -// use crate::vm::ClarityVersion; +/// Maximum size in bytes allowed for types. pub const MAX_VALUE_SIZE: u32 = 1024 * 1024; // 1MB +/// Bytes serialization upper limit. pub const BOUND_VALUE_SERIALIZATION_BYTES: u32 = MAX_VALUE_SIZE * 2; +/// Hex serialization upper limit. pub const BOUND_VALUE_SERIALIZATION_HEX: u32 = BOUND_VALUE_SERIALIZATION_BYTES * 2; - +/// Maximum length for UFT8 string. +pub const MAX_UTF8_VALUE_SIZE: u32 = MAX_VALUE_SIZE / 4; +/// Maximum string length returned from `to-ascii?`. +/// 5 bytes reserved for embedding in response. +pub const MAX_TO_ASCII_RESULT_LEN: u32 = MAX_VALUE_SIZE - 5; +/// Maximum buffer length returned from `to-ascii?`. +/// 2 bytes reserved for "0x" prefix and 2 characters per byte. +pub const MAX_TO_ASCII_BUFFER_LEN: u32 = (MAX_TO_ASCII_RESULT_LEN - 2) / 2; +/// Maximum allowed nesting depth of types. pub const MAX_TYPE_DEPTH: u8 = 32; -// this is the charged size for wrapped values, i.e., response or optionals +/// this is the charged size for wrapped values, i.e., response or optionals pub const WRAPPER_VALUE_SIZE: u32 = 1; #[derive(Debug, Clone, Eq, Serialize, Deserialize)] @@ -342,10 +351,10 @@ impl SequenceData { pub fn element_size(&self) -> Result { let out = match self { - SequenceData::Buffer(..) => TypeSignature::min_buffer()?.size(), + SequenceData::Buffer(..) => TypeSignature::BUFFER_MIN.size(), SequenceData::List(data) => data.type_signature.get_list_item_type().size(), - SequenceData::String(CharType::ASCII(..)) => TypeSignature::min_string_ascii()?.size(), - SequenceData::String(CharType::UTF8(..)) => TypeSignature::min_string_utf8()?.size(), + SequenceData::String(CharType::ASCII(..)) => TypeSignature::STRING_ASCII_MIN.size(), + SequenceData::String(CharType::UTF8(..)) => TypeSignature::STRING_UTF8_MIN.size(), }?; Ok(out) } @@ -455,7 +464,7 @@ impl SequenceData { } } else { Err(CheckErrorKind::TypeValueError( - Box::new(TypeSignature::min_buffer()?), + Box::new(TypeSignature::BUFFER_MIN), Box::new(to_find), ) .into()) @@ -484,7 +493,7 @@ impl SequenceData { } } else { Err(CheckErrorKind::TypeValueError( - Box::new(TypeSignature::min_string_ascii()?), + Box::new(TypeSignature::STRING_ASCII_MIN), Box::new(to_find), ) .into()) @@ -505,7 +514,7 @@ impl SequenceData { } } else { Err(CheckErrorKind::TypeValueError( - Box::new(TypeSignature::min_string_utf8()?), + Box::new(TypeSignature::STRING_UTF8_MIN), Box::new(to_find), ) .into()) diff --git a/clarity-types/src/types/signatures.rs b/clarity-types/src/types/signatures.rs index 3fd0ee57cb4..1ff667023de 100644 --- a/clarity-types/src/types/signatures.rs +++ b/clarity-types/src/types/signatures.rs @@ -19,16 +19,15 @@ use std::hash::Hash; use std::sync::Arc; use std::{cmp, fmt}; -use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use stacks_common::types::StacksEpochId; use crate::errors::CheckErrorKind; use crate::representations::{CONTRACT_MAX_NAME_LENGTH, ClarityName, ContractName}; use crate::types::{ - CharType, MAX_TYPE_DEPTH, MAX_VALUE_SIZE, PrincipalData, QualifiedContractIdentifier, - SequenceData, SequencedValue, StandardPrincipalData, TraitIdentifier, Value, - WRAPPER_VALUE_SIZE, + CharType, MAX_TO_ASCII_BUFFER_LEN, MAX_TO_ASCII_RESULT_LEN, MAX_TYPE_DEPTH, + MAX_UTF8_VALUE_SIZE, MAX_VALUE_SIZE, PrincipalData, QualifiedContractIdentifier, SequenceData, + SequencedValue, StandardPrincipalData, TraitIdentifier, Value, WRAPPER_VALUE_SIZE, }; #[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Serialize, Deserialize, Hash)] @@ -104,9 +103,165 @@ mod tuple_type_map_serde { #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub struct BufferLength(u32); +impl BufferLength { + /// Attempts to create a [`BufferLength`] from a [`u32`] as an [`Option`]. + /// + /// This function is primarily intended for internal use when defining + /// `const` values, since it returns an [`Option`] that can be unwrapped + /// with [`Option::expect`] in a `const fn`. + const fn try_from_u32_as_opt(value: u32) -> Option { + if value > MAX_VALUE_SIZE { + None + } else { + Some(BufferLength(value)) + } + } + + /// Attempts to create a [`BufferLength`] from a [`i128`] as a [`Result`]. + /// + /// This function is primarily intended for internal runtime use, + /// and serves as the central place for all integer validation logic. + fn try_from_i128(data: i128) -> Result { + if data > (MAX_VALUE_SIZE as i128) { + Err(CheckErrors::ValueTooLarge) + } else if data < 0 { + Err(CheckErrors::ValueOutOfBounds) + } else { + Ok(BufferLength(data as u32)) + } + } +} + +/// Test-only utilities for [`BufferLength`]. +#[cfg(test)] +impl BufferLength { + /// Allow to create a [`BufferLength`] in unsafe way, + /// allowing direct write-access to its internal state. + pub fn new_unsafe(value: u32) -> Self { + Self(value) + } + + /// Returns the underlying [`u32`] value of this [`BufferLength`]. + /// This to have an easy read-access to its internal state. + pub fn get_value(&self) -> u32 { + self.0 + } +} + +impl From<&BufferLength> for u32 { + fn from(v: &BufferLength) -> u32 { + v.0 + } +} + +impl From for u32 { + fn from(v: BufferLength) -> u32 { + v.0 + } +} + +impl TryFrom for BufferLength { + type Error = CheckErrors; + fn try_from(data: u32) -> Result { + Self::try_from(data as i128) + } +} + +impl TryFrom for BufferLength { + type Error = CheckErrors; + fn try_from(data: usize) -> Result { + Self::try_from(data as i128) + } +} + +impl TryFrom for BufferLength { + type Error = CheckErrors; + fn try_from(data: i128) -> Result { + Self::try_from_i128(data) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct StringUTF8Length(u32); +impl StringUTF8Length { + /// Attempts to create a [`StringUTF8Length`] from a [`u32`] as an [`Option`]. + /// + /// This function is primarily intended for internal use when defining + /// `const` values, since it returns an [`Option`] that can be unwrapped + /// with [`Option::expect`] in a `const fn`. + const fn try_from_u32_as_opt(value: u32) -> Option { + if value > MAX_UTF8_VALUE_SIZE { + None + } else { + Some(StringUTF8Length(value)) + } + } + + /// Attempts to create a [`StringUTF8Length`] from a [`i128`] as a [`Result`]. + /// + /// This function is primarily intended for internal runtime use, + /// and serves as the central place for all integer validation logic. + fn try_from_i128(value: i128) -> Result { + if value > MAX_UTF8_VALUE_SIZE as i128 { + Err(CheckErrors::ValueTooLarge) + } else if value < 0 { + Err(CheckErrors::ValueOutOfBounds) + } else { + Ok(StringUTF8Length(value as u32)) + } + } +} + +/// Test-only utilities for [`StringUTF8Length`]. +#[cfg(test)] +impl StringUTF8Length { + /// Allow to create a [`StringUTF8Length`] in unsafe way, + /// allowing direct write-access to its internal state. + pub fn new_unsafe(value: u32) -> Self { + Self(value) + } + + /// Returns the underlying [`u32`] value of this [`StringUTF8Length`]. + /// This to have an easy read-access to its internal state. + pub fn get_value(&self) -> u32 { + self.0 + } +} + +impl From<&StringUTF8Length> for u32 { + fn from(v: &StringUTF8Length) -> u32 { + v.0 + } +} + +impl From for u32 { + fn from(v: StringUTF8Length) -> u32 { + v.0 + } +} + +impl TryFrom for StringUTF8Length { + type Error = CheckErrors; + fn try_from(data: u32) -> Result { + Self::try_from(data as i128) + } +} + +impl TryFrom for StringUTF8Length { + type Error = CheckErrors; + fn try_from(data: usize) -> Result { + Self::try_from(data as i128) + } +} + +impl TryFrom for StringUTF8Length { + type Error = CheckErrors; + fn try_from(data: i128) -> Result { + Self::try_from_i128(data) + } +} + // INVARIANTS enforced by the Type Signatures. // 1. A TypeSignature constructor will always fail rather than construct a // type signature for a too large or invalid type. This is why any variable length @@ -150,14 +305,12 @@ pub enum SequenceSubtype { } impl SequenceSubtype { - pub fn unit_type(&self) -> Result { + pub fn unit_type(&self) -> TypeSignature { match &self { - SequenceSubtype::ListType(list_data) => Ok(list_data.clone().destruct().0), - SequenceSubtype::BufferType(_) => TypeSignature::min_buffer(), - SequenceSubtype::StringType(StringSubtype::ASCII(_)) => { - TypeSignature::min_string_ascii() - } - SequenceSubtype::StringType(StringSubtype::UTF8(_)) => TypeSignature::min_string_utf8(), + SequenceSubtype::ListType(list_data) => list_data.clone().destruct().0, + SequenceSubtype::BufferType(_) => TypeSignature::BUFFER_MIN, + SequenceSubtype::StringType(StringSubtype::ASCII(_)) => TypeSignature::STRING_ASCII_MIN, + SequenceSubtype::StringType(StringSubtype::UTF8(_)) => TypeSignature::STRING_UTF8_MIN, } } @@ -183,88 +336,6 @@ use self::TypeSignature::{ ResponseType, SequenceType, TraitReferenceType, TupleType, UIntType, }; -/// Maximum string length returned from `to-ascii?`. -/// 5 bytes reserved for embedding in response. -const MAX_TO_ASCII_RESULT_LEN: u32 = MAX_VALUE_SIZE - 5; - -/// Maximum buffer length returned from `to-ascii?`. -/// 2 bytes reserved for "0x" prefix and 2 characters per byte. -pub const MAX_TO_ASCII_BUFFER_LEN: u32 = (MAX_TO_ASCII_RESULT_LEN - 2) / 2; - -lazy_static! { - pub static ref BUFF_64: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(64u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - pub static ref BUFF_65: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(65u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - pub static ref BUFF_32: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(32u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - pub static ref BUFF_33: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(33u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - pub static ref BUFF_20: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(20u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - pub static ref BUFF_21: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(21u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - pub static ref BUFF_1: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(1u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - pub static ref BUFF_16: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(16u32).expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - /// Maximum-sized buffer allowed for `to-ascii?` call. - pub static ref TO_ASCII_MAX_BUFF: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(MAX_TO_ASCII_BUFFER_LEN) - .expect("BUG: Legal Clarity buffer length marked invalid"), - )) - }; - /// Maximum-length string returned from `to-ascii?` - pub static ref TO_ASCII_RESPONSE_STRING: TypeSignature = { - #[allow(clippy::expect_used)] - SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(BufferLength::try_from(MAX_TO_ASCII_RESULT_LEN) - .expect("BUG: Legal Clarity buffer length marked invalid")), - )) - }; -} - -pub const ASCII_40: TypeSignature = SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(BufferLength(40)), -)); -pub const UTF8_40: TypeSignature = SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( - StringUTF8Length(40), -))); - #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ListTypeData { max_len: u32, @@ -283,109 +354,6 @@ impl From for TypeSignature { } } -impl From<&BufferLength> for u32 { - fn from(v: &BufferLength) -> u32 { - v.0 - } -} - -impl From for u32 { - fn from(v: BufferLength) -> u32 { - v.0 - } -} - -impl TryFrom for BufferLength { - type Error = CheckErrorKind; - fn try_from(data: u32) -> Result { - if data > MAX_VALUE_SIZE { - Err(CheckErrorKind::ValueTooLarge) - } else { - Ok(BufferLength(data)) - } - } -} - -impl TryFrom for BufferLength { - type Error = CheckErrorKind; - fn try_from(data: usize) -> Result { - if data > (MAX_VALUE_SIZE as usize) { - Err(CheckErrorKind::ValueTooLarge) - } else { - Ok(BufferLength(data as u32)) - } - } -} - -impl TryFrom for BufferLength { - type Error = CheckErrorKind; - fn try_from(data: i128) -> Result { - if data > (MAX_VALUE_SIZE as i128) { - Err(CheckErrorKind::ValueTooLarge) - } else if data < 0 { - Err(CheckErrorKind::ValueOutOfBounds) - } else { - Ok(BufferLength(data as u32)) - } - } -} - -impl From<&StringUTF8Length> for u32 { - fn from(v: &StringUTF8Length) -> u32 { - v.0 - } -} - -impl From for u32 { - fn from(v: StringUTF8Length) -> u32 { - v.0 - } -} - -impl TryFrom for StringUTF8Length { - type Error = CheckErrorKind; - fn try_from(data: u32) -> Result { - let len = data - .checked_mul(4) - .ok_or_else(|| CheckErrorKind::ValueTooLarge)?; - if len > MAX_VALUE_SIZE { - Err(CheckErrorKind::ValueTooLarge) - } else { - Ok(StringUTF8Length(data)) - } - } -} - -impl TryFrom for StringUTF8Length { - type Error = CheckErrorKind; - fn try_from(data: usize) -> Result { - let len = data - .checked_mul(4) - .ok_or_else(|| CheckErrorKind::ValueTooLarge)?; - if len > (MAX_VALUE_SIZE as usize) { - Err(CheckErrorKind::ValueTooLarge) - } else { - Ok(StringUTF8Length(data as u32)) - } - } -} - -impl TryFrom for StringUTF8Length { - type Error = CheckErrorKind; - fn try_from(data: i128) -> Result { - let len = data - .checked_mul(4) - .ok_or_else(|| CheckErrorKind::ValueTooLarge)?; - if len > (MAX_VALUE_SIZE as i128) { - Err(CheckErrorKind::ValueTooLarge) - } else if data < 0 { - Err(CheckErrorKind::ValueOutOfBounds) - } else { - Ok(StringUTF8Length(data as u32)) - } - } -} - impl ListTypeData { pub fn new_list( entry_type: TypeSignature, @@ -460,20 +428,6 @@ impl TypeSignature { } } - pub fn new_string_ascii(len: usize) -> Result { - let len = BufferLength::try_from(len)?; - Ok(TypeSignature::SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(len), - ))) - } - - pub fn new_string_utf8(len: usize) -> Result { - let len = StringUTF8Length::try_from(len)?; - Ok(TypeSignature::SequenceType(SequenceSubtype::StringType( - StringSubtype::UTF8(len), - ))) - } - pub fn is_response_type(&self) -> bool { matches!(self, TypeSignature::ResponseType(_)) } @@ -883,82 +837,85 @@ impl TupleTypeSignature { } impl TypeSignature { - pub fn empty_buffer() -> Result { - Ok(SequenceType(SequenceSubtype::BufferType( - 0_u32.try_into().map_err(|_| { - CheckErrorKind::Expects("FAIL: Empty clarity value size is not realizable".into()) - })?, - ))) - } - - pub fn min_buffer() -> Result { - Ok(SequenceType(SequenceSubtype::BufferType( - 1_u32.try_into().map_err(|_| { - CheckErrorKind::Expects("FAIL: Min clarity value size is not realizable".into()) - })?, - ))) - } - - pub fn min_string_ascii() -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(1_u32.try_into().map_err(|_| { - CheckErrorKind::Expects("FAIL: Min clarity value size is not realizable".into()) - })?), - ))) - } - - pub fn min_string_utf8() -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::UTF8(1_u32.try_into().map_err(|_| { - CheckErrorKind::Expects("FAIL: Min clarity value size is not realizable".into()) - })?), - ))) - } - - pub fn max_string_ascii() -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(BufferLength::try_from(MAX_VALUE_SIZE).map_err(|_| { - CheckErrorKind::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in ASCII Type".into(), - ) - })?), - ))) + /// Buffer type with minimum length. Alias for [`TypeSignature::BUFFER_1`]. + pub const BUFFER_MIN: TypeSignature = TypeSignature::BUFFER_1; + /// Buffer type with maximum length ([`MAX_VALUE_SIZE`]). + pub const BUFFER_MAX: TypeSignature = Self::type_buffer_const(MAX_VALUE_SIZE); + /// Buffer type with length 1. + pub const BUFFER_1: TypeSignature = Self::type_buffer_const(1); + /// Buffer type with length 20. + pub const BUFFER_20: TypeSignature = Self::type_buffer_const(20); + /// Buffer type with length 32. + pub const BUFFER_32: TypeSignature = Self::type_buffer_const(32); + /// Buffer type with length 33. + pub const BUFFER_33: TypeSignature = Self::type_buffer_const(33); + /// Buffer type with length 64. + pub const BUFFER_64: TypeSignature = Self::type_buffer_const(64); + /// Buffer type with length 65. + pub const BUFFER_65: TypeSignature = Self::type_buffer_const(65); + + /// String ASCII type with minimum length (`1`). + pub const STRING_ASCII_MIN: TypeSignature = Self::type_ascii_const(1); + /// String ASCII type with maximum length ([`MAX_VALUE_SIZE`]). + pub const STRING_ASCII_MAX: TypeSignature = Self::type_ascii_const(MAX_VALUE_SIZE); + /// String ASCII type with length 40. + pub const STRING_ASCII_40: TypeSignature = Self::type_ascii_const(40); + + /// String UTF8 type with minimum length (`1`). + pub const STRING_UTF8_MIN: TypeSignature = Self::type_string_utf8(1); + /// String UTF8 type with maximum length ([`MAX_UTF8_VALUE_SIZE`]). + pub const STRING_UTF8_MAX: TypeSignature = Self::type_string_utf8(MAX_UTF8_VALUE_SIZE); + /// String UTF8 type with length 40. + pub const STRING_UTF8_40: TypeSignature = Self::type_string_utf8(40); + + /// Longest ([`MAX_TO_ASCII_BUFFER_LEN`]) buffer allowed for `to-ascii?` call. + pub const TO_ASCII_BUFFER_MAX: TypeSignature = Self::type_buffer_const(MAX_TO_ASCII_BUFFER_LEN); + /// Longest ([`MAX_TO_ASCII_RESULT_LEN`]) string allowed for `to-ascii?` call. + pub const TO_ASCII_STRING_ASCII_MAX: TypeSignature = + Self::type_ascii_const(MAX_TO_ASCII_RESULT_LEN); + + /// Longest ([`CONTRACT_MAX_NAME_LENGTH`]) string allowed for `contract-name`. + pub const CONTRACT_NAME_STRING_ASCII_MAX: TypeSignature = + Self::type_ascii_const(CONTRACT_MAX_NAME_LENGTH as u32); + + /// Creates a buffer type with the specified length. + /// + /// # Note + /// This function is intended for use in constant contexts or for testing purposes. + /// It may panic if the provided length is invalid. + const fn type_buffer_const(len: u32) -> Self { + SequenceType(SequenceSubtype::BufferType( + BufferLength::try_from_u32_as_opt(len).expect("Invalid buffer length!"), + )) } - pub fn max_string_utf8() -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::UTF8(StringUTF8Length::try_from(MAX_VALUE_SIZE / 4).map_err(|_| { - CheckErrorKind::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in UTF8 Type".into(), - ) - })?), + /// Creates a string ASCII type with the specified length. + /// + /// # Note + /// This function is intended for use in constant contexts or for testing purposes. + /// It may panic if the provided length is invalid. + const fn type_ascii_const(len: u32) -> Self { + SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII( + BufferLength::try_from_u32_as_opt(len).expect("Invalid ascii length!"), ))) } - pub fn max_buffer() -> Result { - Ok(SequenceType(SequenceSubtype::BufferType( - BufferLength::try_from(MAX_VALUE_SIZE).map_err(|_| { - CheckErrorKind::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in Buffer Type".into(), - ) - })?, + /// Creates a string UTF8 type with the specified length. + /// + /// # Note + /// This function is intended for use in constant contexts or for testing purposes. + /// It may panic if the provided length is invalid. + const fn type_string_utf8(len: u32) -> Self { + SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8( + StringUTF8Length::try_from_u32_as_opt(len).expect("Invalid utf8 length!"), ))) } - pub fn contract_name_string_ascii_type() -> Result { - TypeSignature::bound_string_ascii_type(CONTRACT_MAX_NAME_LENGTH.try_into().map_err( - |_| CheckErrorKind::Expects("FAIL: contract name max length exceeds u32 space".into()), - )?) - } - - pub fn bound_string_ascii_type(max_len: u32) -> Result { - Ok(SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(BufferLength::try_from(max_len).map_err(|_| { - CheckErrorKind::Expects( - "FAIL: Max Clarity Value Size is no longer realizable in ASCII Type".into(), - ) - })?), - ))) + /// Creates a string ASCII type with the specified length. + /// It may panic if the provided length is invalid. + #[cfg(test)] + pub const fn new_ascii_type_checked(len: u32) -> Self { + Self::type_ascii_const(len) } /// If one of the types is a NoType, return Ok(the other type), otherwise return least_supertype(a, b) diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index 45ad3f529e4..4a0b5a65338 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -24,7 +24,7 @@ use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::{ BlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, - TupleTypeSignature, TypeSignature, Value, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, + TupleTypeSignature, TypeSignature, Value, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -90,7 +90,7 @@ fn check_special_at_block( context: &TypingContext, ) -> Result { check_argument_count(2, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; checker.type_check(&args[1], context) } @@ -492,7 +492,7 @@ fn check_principal_of( context: &TypingContext, ) -> Result { check_argument_count(1, args)?; - checker.type_check_expects(&args[0], context, &BUFF_33)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_33)?; Ok( TypeSignature::new_response(TypeSignature::PrincipalType, TypeSignature::UIntType) .map_err(|_| CheckErrorKind::Expects("Bad constructor".into()))?, @@ -505,10 +505,10 @@ fn check_secp256k1_recover( context: &TypingContext, ) -> Result { check_argument_count(2, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; - checker.type_check_expects(&args[1], context, &BUFF_65)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_65)?; Ok( - TypeSignature::new_response(BUFF_33.clone(), TypeSignature::UIntType) + TypeSignature::new_response(TypeSignature::BUFFER_33, TypeSignature::UIntType) .map_err(|_| CheckErrorKind::Expects("Bad constructor".into()))?, ) } @@ -519,9 +519,9 @@ fn check_secp256k1_verify( context: &TypingContext, ) -> Result { check_argument_count(3, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; - checker.type_check_expects(&args[1], context, &BUFF_65)?; - checker.type_check_expects(&args[2], context, &BUFF_33)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_65)?; + checker.type_check_expects(&args[2], context, &TypeSignature::BUFFER_33)?; Ok(TypeSignature::BoolType) } @@ -620,43 +620,43 @@ impl TypedNativeFunction { }))), Hash160 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_20.clone(), + TypeSignature::BUFFER_20, ))), Sha256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Sha512Trunc256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Sha512 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_64.clone(), + TypeSignature::BUFFER_64, ))), Keccak256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Secp256k1Recover => Special(SpecialNativeFunction(&check_secp256k1_recover)), Secp256k1Verify => Special(SpecialNativeFunction(&check_secp256k1_verify)), diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs index 44878db720f..8b03807c7a9 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/sequences.rs @@ -25,7 +25,7 @@ use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::{analysis_typecheck_cost, runtime_cost}; use crate::vm::functions::NativeFunctions; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; -pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length, BUFF_1}; +pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length}; use crate::vm::types::SequenceSubtype::*; use crate::vm::types::StringSubtype::*; use crate::vm::types::{FunctionType, TypeSignature, Value}; @@ -84,12 +84,12 @@ pub fn check_special_map( TypeSignature::SequenceType(sequence) => { let (entry_type, len) = match sequence { ListType(list_data) => list_data.destruct(), - BufferType(buffer_data) => (TypeSignature::min_buffer()?, buffer_data.into()), + BufferType(buffer_data) => (TypeSignature::BUFFER_MIN, buffer_data.into()), StringType(ASCII(ascii_data)) => { - (TypeSignature::min_string_ascii()?, ascii_data.into()) + (TypeSignature::STRING_ASCII_MIN, ascii_data.into()) } StringType(UTF8(utf8_data)) => { - (TypeSignature::min_string_utf8()?, utf8_data.into()) + (TypeSignature::STRING_UTF8_MIN, utf8_data.into()) } }; min_args = min_args.min(len); @@ -132,7 +132,7 @@ pub fn check_special_filter( { let input_type = match argument_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrorKind::ExpectedSequence(Box::new( argument_type.clone(), ))), @@ -175,7 +175,7 @@ pub fn check_special_fold( let argument_type = checker.type_check(&args[1], context)?; let input_type = match argument_type { - TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrorKind::ExpectedSequence(Box::new(argument_type))), }?; @@ -394,9 +394,9 @@ pub fn check_special_element_at( let (entry_type, _) = list.destruct(); TypeSignature::new_option(entry_type).map_err(|e| e.into()) } - TypeSignature::SequenceType(BufferType(_)) => { - Ok(TypeSignature::OptionalType(Box::new(BUFF_1.clone()))) - } + TypeSignature::SequenceType(BufferType(_)) => Ok(TypeSignature::OptionalType(Box::new( + TypeSignature::BUFFER_1, + ))), TypeSignature::SequenceType(StringType(ASCII(_))) => Ok(TypeSignature::OptionalType( Box::new(TypeSignature::SequenceType(StringType(ASCII( BufferLength::try_from(1u32) @@ -424,7 +424,7 @@ pub fn check_special_index_of( let list_type = checker.type_check(&args[0], context)?; let expected_input_type = match list_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrorKind::ExpectedSequence(Box::new(list_type))), }?; diff --git a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs index ffb6cc9d25b..db7cdf7810f 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/tests/mod.rs @@ -25,7 +25,6 @@ use crate::vm::types::StringSubtype::*; use crate::vm::types::TypeSignature::{BoolType, IntType, PrincipalType, UIntType}; use crate::vm::types::{ FixedFunction, FunctionType, QualifiedContractIdentifier, TypeSignature, TypeSignatureExt as _, - BUFF_32, BUFF_64, }; use crate::vm::ClarityVersion; mod assets; @@ -460,7 +459,10 @@ fn test_at_block() { let bad = [ ( "(at-block (sha512 u0) u1)", - CheckErrorKind::TypeError(Box::new(BUFF_32.clone()), Box::new(BUFF_64.clone())), + CheckErrorKind::TypeError( + Box::new(TypeSignature::BUFFER_32), + Box::new(TypeSignature::BUFFER_64), + ), ), ( "(at-block (sha256 u0) u1 u2)", @@ -725,16 +727,16 @@ fn test_index_of() { Box::new(TypeSignature::UIntType), ), CheckErrorKind::TypeError( - Box::new(TypeSignature::min_buffer().unwrap()), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::BUFFER_MIN), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrorKind::TypeError( - Box::new(TypeSignature::min_string_utf8().unwrap()), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrorKind::TypeError( - Box::new(TypeSignature::min_string_ascii().unwrap()), - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), + Box::new(TypeSignature::STRING_UTF8_MIN), ), CheckErrorKind::CouldNotDetermineType, CheckErrorKind::CouldNotDetermineType, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index d70e5af87b5..aac62fb73ad 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -410,9 +410,9 @@ impl FunctionType { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii()?, - TypeSignature::max_string_utf8()?, - TypeSignature::max_buffer()?, + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, + TypeSignature::BUFFER_MAX, ], Box::new(first.clone()), ) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs index f41702d56c7..b918c66da48 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/conversions.rs @@ -38,6 +38,6 @@ pub fn check_special_from_consensus_buff( ) -> Result { check_argument_count(2, args)?; let result_type = TypeSignature::parse_type_repr(StacksEpochId::Epoch21, &args[0], checker)?; - checker.type_check_expects(&args[1], context, &TypeSignature::max_buffer()?)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_MAX)?; TypeSignature::new_option(result_type).map_err(StaticCheckError::from) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index ee68c359c7b..d91f0d7e781 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -26,14 +26,12 @@ use crate::vm::costs::{analysis_typecheck_cost, runtime_cost, CostErrors, CostTr use crate::vm::diagnostic::DiagnosableError; use crate::vm::functions::{handle_binding_list, NativeFunctions}; use crate::vm::types::signatures::{ - CallableSubtype, FunctionArgSignature, FunctionReturnsSignature, SequenceSubtype, ASCII_40, - TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING, UTF8_40, + CallableSubtype, FunctionArgSignature, FunctionReturnsSignature, SequenceSubtype, }; use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, FunctionSignature, FunctionType, PrincipalData, StacksBlockInfoProperty, TenureInfoProperty, - TupleTypeSignature, TypeSignature, Value, BUFF_1, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, - MAX_VALUE_SIZE, + TupleTypeSignature, TypeSignature, Value, MAX_VALUE_SIZE, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -130,7 +128,7 @@ fn check_special_at_block( context: &TypingContext, ) -> Result { check_argument_count(2, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; checker.type_check(&args[1], context) } @@ -668,7 +666,7 @@ fn check_principal_of( context: &TypingContext, ) -> Result { check_argument_count(1, args)?; - checker.type_check_expects(&args[0], context, &BUFF_33)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_33)?; Ok( TypeSignature::new_response(TypeSignature::PrincipalType, TypeSignature::UIntType) .map_err(|_| CheckErrorKind::Expects("Bad constructor".into()))?, @@ -688,13 +686,13 @@ fn check_principal_construct( ) -> Result { check_arguments_at_least(2, args)?; check_arguments_at_most(3, args)?; - checker.type_check_expects(&args[0], context, &BUFF_1)?; - checker.type_check_expects(&args[1], context, &BUFF_20)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_1)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_20)?; if args.len() > 2 { checker.type_check_expects( &args[2], context, - &TypeSignature::contract_name_string_ascii_type()?, + &TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX, )?; } Ok(TypeSignature::new_response( @@ -719,10 +717,10 @@ fn check_secp256k1_recover( context: &TypingContext, ) -> Result { check_argument_count(2, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; - checker.type_check_expects(&args[1], context, &BUFF_65)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_65)?; Ok( - TypeSignature::new_response(BUFF_33.clone(), TypeSignature::UIntType) + TypeSignature::new_response(TypeSignature::BUFFER_33, TypeSignature::UIntType) .map_err(|_| CheckErrorKind::Expects("Bad constructor".into()))?, ) } @@ -733,9 +731,9 @@ fn check_secp256k1_verify( context: &TypingContext, ) -> Result { check_argument_count(3, args)?; - checker.type_check_expects(&args[0], context, &BUFF_32)?; - checker.type_check_expects(&args[1], context, &BUFF_65)?; - checker.type_check_expects(&args[2], context, &BUFF_33)?; + checker.type_check_expects(&args[0], context, &TypeSignature::BUFFER_32)?; + checker.type_check_expects(&args[1], context, &TypeSignature::BUFFER_65)?; + checker.type_check_expects(&args[2], context, &TypeSignature::BUFFER_33)?; Ok(TypeSignature::BoolType) } @@ -946,27 +944,27 @@ impl TypedNativeFunction { } StringToInt => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_string_ascii()?, - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, ], TypeSignature::OptionalType(Box::new(TypeSignature::IntType)), ))), StringToUInt => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_string_ascii()?, - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, ], TypeSignature::OptionalType(Box::new(TypeSignature::UIntType)), ))), IntToAscii => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![TypeSignature::IntType, TypeSignature::UIntType], // 40 is the longest string one can get from int->string conversion. - ASCII_40, + TypeSignature::STRING_ASCII_40, ))), IntToUtf8 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![TypeSignature::IntType, TypeSignature::UIntType], // 40 is the longest string one can get from int->string conversion. - UTF8_40, + TypeSignature::STRING_UTF8_40, ))), Not => Simple(SimpleNativeFunction(FunctionType::Fixed(FixedFunction { args: vec![FunctionArg::new( @@ -981,43 +979,43 @@ impl TypedNativeFunction { }))), Hash160 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_20.clone(), + TypeSignature::BUFFER_20, ))), Sha256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Sha512Trunc256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Sha512 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_64.clone(), + TypeSignature::BUFFER_64, ))), Keccak256 => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, TypeSignature::UIntType, TypeSignature::IntType, ], - BUFF_32.clone(), + TypeSignature::BUFFER_32, ))), Secp256k1Recover => Special(SpecialNativeFunction(&check_secp256k1_recover)), Secp256k1Verify => Special(SpecialNativeFunction(&check_secp256k1_verify)), @@ -1047,12 +1045,12 @@ impl TypedNativeFunction { /// and error types are the same. fn parse_principal_basic_type() -> Result { TupleTypeSignature::try_from(vec![ - ("version".into(), BUFF_1.clone()), - ("hash-bytes".into(), BUFF_20.clone()), + ("version".into(), TypeSignature::BUFFER_1), + ("hash-bytes".into(), TypeSignature::BUFFER_20), ( "name".into(), TypeSignature::new_option( - TypeSignature::contract_name_string_ascii_type()?, + TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX, ) .map_err(|_| CheckErrorKind::Expects("Bad constructor".into()))?, ), @@ -1202,8 +1200,11 @@ impl TypedNativeFunction { ) })?, )], - returns: TypeSignature::new_response(BUFF_32.clone(), TypeSignature::UIntType) - .map_err(|_| CheckErrorKind::Expects("Bad constructor".into()))?, + returns: TypeSignature::new_response( + TypeSignature::BUFFER_32, + TypeSignature::UIntType, + ) + .map_err(|_| CheckErrorKind::Expects("Bad constructor".into()))?, }))), ToAscii => Simple(SimpleNativeFunction(FunctionType::UnionArgs( vec![ @@ -1211,11 +1212,11 @@ impl TypedNativeFunction { TypeSignature::UIntType, TypeSignature::BoolType, TypeSignature::PrincipalType, - TO_ASCII_MAX_BUFF.clone(), - TypeSignature::max_string_utf8()?, + TypeSignature::TO_ASCII_BUFFER_MAX, + TypeSignature::STRING_UTF8_MAX, ], TypeSignature::new_response( - TO_ASCII_RESPONSE_STRING.clone(), + TypeSignature::TO_ASCII_STRING_ASCII_MAX, TypeSignature::UIntType, ) .map_err(|_| { diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs index bcf9354b667..c2e162dae25 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/sequences.rs @@ -26,7 +26,7 @@ use crate::vm::costs::{analysis_typecheck_cost, runtime_cost, CostTracker}; use crate::vm::diagnostic::Diagnostic; use crate::vm::functions::NativeFunctions; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; -pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length, BUFF_1}; +pub use crate::vm::types::signatures::{BufferLength, ListTypeData, StringUTF8Length}; use crate::vm::types::SequenceSubtype::*; use crate::vm::types::StringSubtype::*; use crate::vm::types::{FunctionType, TypeSignature, Value}; @@ -90,12 +90,12 @@ pub fn check_special_map( TypeSignature::SequenceType(sequence) => { let (entry_type, len) = match sequence { ListType(list_data) => list_data.destruct(), - BufferType(buffer_data) => (TypeSignature::min_buffer()?, buffer_data.into()), + BufferType(buffer_data) => (TypeSignature::BUFFER_MIN, buffer_data.into()), StringType(ASCII(ascii_data)) => { - (TypeSignature::min_string_ascii()?, ascii_data.into()) + (TypeSignature::STRING_ASCII_MIN, ascii_data.into()) } StringType(UTF8(utf8_data)) => { - (TypeSignature::min_string_utf8()?, utf8_data.into()) + (TypeSignature::STRING_UTF8_MIN, utf8_data.into()) } }; min_args = min_args.min(len); @@ -181,7 +181,7 @@ pub fn check_special_filter( { let input_type = match argument_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrorKind::ExpectedSequence(Box::new( argument_type.clone(), ))), @@ -224,7 +224,7 @@ pub fn check_special_fold( let argument_type = checker.type_check(&args[1], context)?; let input_type = match argument_type { - TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrorKind::ExpectedSequence(Box::new(argument_type))), }?; @@ -443,9 +443,9 @@ pub fn check_special_element_at( let (entry_type, _) = list.destruct(); TypeSignature::new_option(entry_type).map_err(|e| e.into()) } - TypeSignature::SequenceType(BufferType(_)) => { - Ok(TypeSignature::OptionalType(Box::new(BUFF_1.clone()))) - } + TypeSignature::SequenceType(BufferType(_)) => Ok(TypeSignature::OptionalType(Box::new( + TypeSignature::BUFFER_1, + ))), TypeSignature::SequenceType(StringType(ASCII(_))) => Ok(TypeSignature::OptionalType( Box::new(TypeSignature::SequenceType(StringType(ASCII( BufferLength::try_from(1u32) @@ -473,7 +473,7 @@ pub fn check_special_index_of( let list_type = checker.type_check(&args[0], context)?; let expected_input_type = match list_type { - TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()?), + TypeSignature::SequenceType(ref sequence_type) => Ok(sequence_type.unit_type()), _ => Err(CheckErrorKind::ExpectedSequence(Box::new(list_type))), }?; @@ -523,7 +523,7 @@ pub fn check_special_replace_at( TypeSignature::SequenceType(seq) => seq, _ => return Err(CheckErrorKind::ExpectedSequence(Box::new(input_type)).into()), }; - let unit_seq = seq_type.unit_type()?; + let unit_seq = seq_type.unit_type(); // Check index argument checker.type_check_expects(&args[1], context, &TypeSignature::UIntType)?; // Check element argument diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 696ce27a3e0..18d3ae19d52 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -31,7 +31,7 @@ use crate::vm::ast::parse; use crate::vm::costs::LimitedCostTracker; use crate::vm::database::MemoryBackingStore; use crate::vm::tests::test_clarity_versions; -use crate::vm::types::signatures::{CallableSubtype, TO_ASCII_MAX_BUFF, TO_ASCII_RESPONSE_STRING}; +use crate::vm::types::signatures::CallableSubtype; use crate::vm::types::{ BufferLength, ListTypeData, QualifiedContractIdentifier, SequenceSubtype, StringSubtype, StringUTF8Length, TypeSignature, @@ -3511,16 +3511,19 @@ fn test_contract_hash(#[case] version: ClarityVersion, #[case] epoch: StacksEpoc #[apply(test_clarity_versions)] fn test_to_ascii(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let to_ascii_response_type = Some( - TypeSignature::new_response(TO_ASCII_RESPONSE_STRING.clone(), TypeSignature::UIntType) - .unwrap(), + TypeSignature::new_response( + TypeSignature::TO_ASCII_STRING_ASCII_MAX, + TypeSignature::UIntType, + ) + .unwrap(), ); let to_ascii_expected_types = vec![ TypeSignature::IntType, TypeSignature::UIntType, TypeSignature::BoolType, TypeSignature::PrincipalType, - TO_ASCII_MAX_BUFF.clone(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::TO_ASCII_BUFFER_MAX, + TypeSignature::STRING_UTF8_MAX, ]; let test_cases = [ ( @@ -3553,6 +3556,11 @@ fn test_to_ascii(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) "buffer type", Ok(to_ascii_response_type.clone()), ), + ( + &format!("(to-ascii? 0x{})", "ff".repeat(524284)), + "max len buffer type", + Ok(to_ascii_response_type.clone()), + ), ( &format!("(to-ascii? 0x{})", "ff".repeat(524285)), "oversized buffer type", diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 0329605ee7c..fb5004ca2c2 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use clarity_types::types::SequenceSubtype; #[cfg(test)] use rstest::rstest; #[cfg(test)] @@ -33,7 +34,7 @@ use crate::vm::types::StringSubtype::*; use crate::vm::types::TypeSignature::{BoolType, IntType, PrincipalType, SequenceType, UIntType}; use crate::vm::types::{ BufferLength, FixedFunction, FunctionType, QualifiedContractIdentifier, TraitIdentifier, - TypeSignature, TypeSignatureExt as _, BUFF_1, BUFF_20, BUFF_21, BUFF_32, BUFF_64, + TypeSignature, TypeSignatureExt as _, }; use crate::vm::{execute_v2, ClarityName, ClarityVersion}; @@ -102,7 +103,7 @@ fn test_from_consensus_buff() { ( "(from-consensus-buff? int u6)", CheckErrorKind::TypeError( - Box::new(TypeSignature::max_buffer().unwrap()), + Box::new(TypeSignature::BUFFER_MAX), Box::new(TypeSignature::UIntType), ), ), @@ -758,7 +759,10 @@ fn test_at_block() { let bad = [ ( "(at-block (sha512 u0) u1)", - CheckErrorKind::TypeError(Box::new(BUFF_32.clone()), Box::new(BUFF_64.clone())), + CheckErrorKind::TypeError( + Box::new(TypeSignature::BUFFER_32), + Box::new(TypeSignature::BUFFER_64), + ), ), ( "(at-block (sha256 u0) u1 u2)", @@ -1097,16 +1101,16 @@ fn test_index_of() { Box::new(TypeSignature::UIntType), ), CheckErrorKind::TypeError( - Box::new(TypeSignature::min_buffer().unwrap()), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::BUFFER_MIN), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrorKind::TypeError( - Box::new(TypeSignature::min_string_utf8().unwrap()), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrorKind::TypeError( - Box::new(TypeSignature::min_string_ascii().unwrap()), - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), + Box::new(TypeSignature::STRING_UTF8_MIN), ), CheckErrorKind::TypeError( Box::new(TypeSignature::list_of(TypeSignature::IntType, 1).unwrap()), @@ -1118,16 +1122,16 @@ fn test_index_of() { Box::new(TypeSignature::UIntType), ), CheckErrorKind::TypeError( - Box::new(TypeSignature::min_buffer().unwrap()), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::BUFFER_MIN), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrorKind::TypeError( - Box::new(TypeSignature::min_string_utf8().unwrap()), - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), + Box::new(TypeSignature::STRING_ASCII_MIN), ), CheckErrorKind::TypeError( - Box::new(TypeSignature::min_string_ascii().unwrap()), - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), + Box::new(TypeSignature::STRING_UTF8_MIN), ), CheckErrorKind::CouldNotDetermineType, CheckErrorKind::CouldNotDetermineType, @@ -2239,8 +2243,8 @@ fn test_string_to_ints() { CheckErrorKind::IncorrectArgumentCount(1, 0), CheckErrorKind::UnionTypeError( vec![ - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, ], Box::new(SequenceType(BufferType( BufferLength::try_from(17_u32).unwrap(), @@ -2248,8 +2252,8 @@ fn test_string_to_ints() { ), CheckErrorKind::UnionTypeError( vec![ - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, ], Box::new(IntType), ), @@ -2257,8 +2261,8 @@ fn test_string_to_ints() { CheckErrorKind::IncorrectArgumentCount(1, 0), CheckErrorKind::UnionTypeError( vec![ - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, ], Box::new(SequenceType(BufferType( BufferLength::try_from(17_u32).unwrap(), @@ -2266,8 +2270,8 @@ fn test_string_to_ints() { ), CheckErrorKind::UnionTypeError( vec![ - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, ], Box::new(IntType), ), @@ -3358,7 +3362,7 @@ fn test_principal_destruct() { CheckErrorKind::IncorrectArgumentCount(1, 0), CheckErrorKind::TypeError( Box::new(TypeSignature::PrincipalType), - Box::new(BUFF_1.clone()), + Box::new(TypeSignature::BUFFER_1), ), ]; @@ -3420,31 +3424,39 @@ fn test_principal_construct() { // The first buffer is too long, should be `(buff 1)`. ( r#"(principal-construct? 0xfa6bf38ed557fe417333710d6033e9419391a320 0xfa6bf38ed557fe417333710d6033e9419391a320)"#, - CheckErrorKind::TypeError(Box::new(BUFF_1.clone()), Box::new(BUFF_20.clone())), + CheckErrorKind::TypeError( + Box::new(TypeSignature::BUFFER_1), + Box::new(TypeSignature::BUFFER_20), + ), ), // The second buffer is too long, should be `(buff 20)`. ( r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a32009)"#, - CheckErrorKind::TypeError(Box::new(BUFF_20.clone()), Box::new(BUFF_21.clone())), + CheckErrorKind::TypeError( + Box::new(TypeSignature::BUFFER_20), + Box::new(TypeSignature::SequenceType(SequenceSubtype::BufferType( + 21_u32.try_into().unwrap(), + ))), + ), ), // `int` argument instead of `(buff 1)` for version. ( r#"(principal-construct? 22 0xfa6bf38ed557fe417333710d6033e9419391a320)"#, - CheckErrorKind::TypeError(Box::new(BUFF_1.clone()), Box::new(IntType.clone())), + CheckErrorKind::TypeError(Box::new(TypeSignature::BUFFER_1), Box::new(IntType.clone())), ), // `name` argument is too long ( r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a320 "foooooooooooooooooooooooooooooooooooooooo")"#, CheckErrorKind::TypeError( - Box::new(TypeSignature::contract_name_string_ascii_type().unwrap()), - Box::new(TypeSignature::bound_string_ascii_type(41).unwrap()), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), + Box::new(SequenceType(StringType(ASCII(41_u32.try_into().unwrap())))), ), ), // bad argument type for `name` ( r#"(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a320 u123)"#, CheckErrorKind::TypeError( - Box::new(TypeSignature::contract_name_string_ascii_type().unwrap()), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), Box::new(UIntType), ), ), diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index df84474a394..148247e59a6 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2806,7 +2806,7 @@ mod test { BurnStateDB, ClarityDatabase, HeadersDB, MemoryBackingStore, STXBalance, }; use crate::vm::docs::get_output_type_string; - use crate::vm::types::signatures::{FunctionArgSignature, FunctionReturnsSignature, ASCII_40}; + use crate::vm::types::signatures::{FunctionArgSignature, FunctionReturnsSignature}; use crate::vm::types::{ FunctionType, PrincipalData, QualifiedContractIdentifier, TupleData, TypeSignature, }; @@ -3330,7 +3330,10 @@ mod test { function_type = FunctionType::Binary( FunctionArgSignature::Single(TypeSignature::IntType), - FunctionArgSignature::Union(vec![ASCII_40, TypeSignature::IntType]), + FunctionArgSignature::Union(vec![ + TypeSignature::STRING_ASCII_40, + TypeSignature::IntType, + ]), ret.clone(), ); result = get_input_type_string(&function_type); @@ -3474,7 +3477,7 @@ mod test { TypeSignature::IntType, TypeSignature::UIntType, TypeSignature::PrincipalType, - ASCII_40, + TypeSignature::STRING_ASCII_40, ]), FunctionReturnsSignature::TypeOfArgAtPosition(1), ); diff --git a/clarity/src/vm/functions/arithmetic.rs b/clarity/src/vm/functions/arithmetic.rs index 5b40fc1e30d..8682e217739 100644 --- a/clarity/src/vm/functions/arithmetic.rs +++ b/clarity/src/vm/functions/arithmetic.rs @@ -123,9 +123,9 @@ macro_rules! type_force_binary_comparison_v2 { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii()?, - TypeSignature::max_string_utf8()?, - TypeSignature::max_buffer()?, + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, + TypeSignature::BUFFER_MAX, ], Box::new(x), ) diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index 5f3e8033629..ee798e5f23c 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -22,7 +22,6 @@ use crate::vm::errors::{ check_argument_count, CheckErrorKind, InterpreterResult as Result, VmInternalError, }; use crate::vm::representations::SymbolicExpression; -use crate::vm::types::signatures::TO_ASCII_MAX_BUFF; use crate::vm::types::SequenceSubtype::BufferType; use crate::vm::types::TypeSignature::SequenceType; use crate::vm::types::{ @@ -151,8 +150,8 @@ pub fn native_string_to_int_generic( } _ => Err(CheckErrorKind::UnionTypeValueError( vec![ - TypeSignature::max_string_ascii()?, - TypeSignature::max_string_utf8()?, + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, ], Box::new(value), ) @@ -277,8 +276,8 @@ pub fn special_to_ascii( TypeSignature::UIntType, TypeSignature::BoolType, TypeSignature::PrincipalType, - TO_ASCII_MAX_BUFF.clone(), - TypeSignature::max_string_utf8()?, + TypeSignature::TO_ASCII_BUFFER_MAX, + TypeSignature::STRING_UTF8_MAX, ], Box::new(value), ) @@ -325,7 +324,7 @@ pub fn from_consensus_buff( Ok(buff_data.data) } else { Err(CheckErrorKind::TypeValueError( - Box::new(TypeSignature::max_buffer()?), + Box::new(TypeSignature::BUFFER_MAX), Box::new(value), )) }?; diff --git a/clarity/src/vm/functions/crypto.rs b/clarity/src/vm/functions/crypto.rs index d52ff19dded..f0159db8c2c 100644 --- a/clarity/src/vm/functions/crypto.rs +++ b/clarity/src/vm/functions/crypto.rs @@ -27,7 +27,7 @@ use crate::vm::errors::{ check_argument_count, CheckErrorKind, InterpreterResult as Result, VmInternalError, }; use crate::vm::representations::SymbolicExpression; -use crate::vm::types::{BuffData, SequenceData, TypeSignature, Value, BUFF_32, BUFF_33, BUFF_65}; +use crate::vm::types::{BuffData, SequenceData, TypeSignature, Value}; use crate::vm::{eval, ClarityVersion, Environment, LocalContext}; macro_rules! native_hash_func { @@ -41,7 +41,7 @@ macro_rules! native_hash_func { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer()?, + TypeSignature::BUFFER_MAX, ], Box::new(input), )), @@ -103,7 +103,7 @@ pub fn special_principal_of( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() != 33 { return Err(CheckErrorKind::TypeValueError( - Box::new(BUFF_33.clone()), + Box::new(TypeSignature::BUFFER_33), Box::new(param0), ) .into()); @@ -111,9 +111,11 @@ pub fn special_principal_of( data } _ => { - return Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_33.clone()), Box::new(param0)).into(), + return Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_33), + Box::new(param0), ) + .into()) } }; @@ -149,7 +151,7 @@ pub fn special_secp256k1_recover( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() != 32 { return Err(CheckErrorKind::TypeValueError( - Box::new(BUFF_32.clone()), + Box::new(TypeSignature::BUFFER_32), Box::new(param0), ) .into()); @@ -157,9 +159,11 @@ pub fn special_secp256k1_recover( data } _ => { - return Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_32.clone()), Box::new(param0)).into(), + return Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_32), + Box::new(param0), ) + .into()) } }; @@ -168,7 +172,7 @@ pub fn special_secp256k1_recover( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() > 65 { return Err(CheckErrorKind::TypeValueError( - Box::new(BUFF_65.clone()), + Box::new(TypeSignature::BUFFER_65), Box::new(param1), ) .into()); @@ -179,9 +183,11 @@ pub fn special_secp256k1_recover( data } _ => { - return Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_65.clone()), Box::new(param1)).into(), + return Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_65), + Box::new(param1), ) + .into()) } }; @@ -213,7 +219,7 @@ pub fn special_secp256k1_verify( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() != 32 { return Err(CheckErrorKind::TypeValueError( - Box::new(BUFF_32.clone()), + Box::new(TypeSignature::BUFFER_32), Box::new(param0), ) .into()); @@ -221,9 +227,11 @@ pub fn special_secp256k1_verify( data } _ => { - return Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_32.clone()), Box::new(param0)).into(), + return Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_32), + Box::new(param0), ) + .into()) } }; @@ -232,7 +240,7 @@ pub fn special_secp256k1_verify( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() > 65 { return Err(CheckErrorKind::TypeValueError( - Box::new(BUFF_65.clone()), + Box::new(TypeSignature::BUFFER_65), Box::new(param1), ) .into()); @@ -246,9 +254,11 @@ pub fn special_secp256k1_verify( data } _ => { - return Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_65.clone()), Box::new(param1)).into(), + return Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_65), + Box::new(param1), ) + .into()) } }; @@ -257,7 +267,7 @@ pub fn special_secp256k1_verify( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => { if data.len() != 33 { return Err(CheckErrorKind::TypeValueError( - Box::new(BUFF_33.clone()), + Box::new(TypeSignature::BUFFER_33), Box::new(param2), ) .into()); @@ -265,9 +275,11 @@ pub fn special_secp256k1_verify( data } _ => { - return Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_33.clone()), Box::new(param2)).into(), + return Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_33), + Box::new(param2), ) + .into()) } }; diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index fc1857e4421..445d45f9759 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -28,7 +28,7 @@ use crate::vm::errors::{ use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; use crate::vm::types::{ BlockInfoProperty, BuffData, BurnBlockInfoProperty, PrincipalData, SequenceData, - StacksBlockInfoProperty, TenureInfoProperty, TupleData, TypeSignature, Value, BUFF_32, + StacksBlockInfoProperty, TenureInfoProperty, TupleData, TypeSignature, Value, }; use crate::vm::{eval, ClarityVersion, Environment, LocalContext}; @@ -457,9 +457,11 @@ pub fn special_at_block( } } x => { - return Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_32.clone()), Box::new(x)).into(), + return Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_32), + Box::new(x), ) + .into()) } }; diff --git a/clarity/src/vm/functions/principals.rs b/clarity/src/vm/functions/principals.rs index 55098aa2514..221c66bdb91 100644 --- a/clarity/src/vm/functions/principals.rs +++ b/clarity/src/vm/functions/principals.rs @@ -13,7 +13,6 @@ use crate::vm::errors::{ use crate::vm::representations::{ SymbolicExpression, CONTRACT_MAX_NAME_LENGTH, CONTRACT_MIN_NAME_LENGTH, }; -use crate::vm::types::signatures::{BUFF_1, BUFF_20}; use crate::vm::types::{ ASCIIData, BuffData, CharType, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, SequenceData, StandardPrincipalData, TupleData, TypeSignature, Value, @@ -211,19 +210,22 @@ pub fn special_principal_construct( _ => { return { // This is an aborting error because this should have been caught in analysis pass. - Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_1.clone()), Box::new(version)) - .into(), + Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_1), + Box::new(version), ) + .into()) }; } }; let version_byte = if verified_version.len() > 1 { // should have been caught by the type-checker - return Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_1.clone()), Box::new(version)).into(), - ); + return Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_1), + Box::new(version), + ) + .into()); } else if verified_version.is_empty() { // the type checker does not check the actual length of the buffer, but a 0-length buffer // will type-check to (buff 1) @@ -248,7 +250,7 @@ pub fn special_principal_construct( Value::Sequence(SequenceData::Buffer(BuffData { ref data })) => data, _ => { return Err(CheckErrorKind::TypeValueError( - Box::new(BUFF_20.clone()), + Box::new(TypeSignature::BUFFER_20), Box::new(hash_bytes), ) .into()) @@ -259,7 +261,7 @@ pub fn special_principal_construct( // This is an aborting error because this should have been caught in analysis pass. if verified_hash_bytes.len() > 20 { return Err(CheckErrorKind::TypeValueError( - Box::new(BUFF_20.clone()), + Box::new(TypeSignature::BUFFER_20), Box::new(hash_bytes), ) .into()); @@ -283,7 +285,7 @@ pub fn special_principal_construct( Value::Sequence(SequenceData::String(CharType::ASCII(ascii_data))) => ascii_data, _ => { return Err(CheckErrorKind::TypeValueError( - Box::new(TypeSignature::contract_name_string_ascii_type()?), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), Box::new(name), ) .into()) @@ -300,7 +302,7 @@ pub fn special_principal_construct( // if it's too long, then this should have been caught by the type-checker if name_bytes.data.len() > CONTRACT_MAX_NAME_LENGTH { return Err(CheckErrorKind::TypeValueError( - Box::new(TypeSignature::contract_name_string_ascii_type()?), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), Box::new(Value::from(name_bytes)), ) .into()); diff --git a/clarity/src/vm/functions/sequences.rs b/clarity/src/vm/functions/sequences.rs index ebc6a6f7dc6..eb66ca82e52 100644 --- a/clarity/src/vm/functions/sequences.rs +++ b/clarity/src/vm/functions/sequences.rs @@ -438,7 +438,7 @@ pub fn special_replace_at( runtime_cost(ClarityCostFunction::ReplaceAt, env, seq_type.size()?)?; let expected_elem_type = if let TypeSignature::SequenceType(seq_subtype) = &seq_type { - seq_subtype.unit_type()? + seq_subtype.unit_type() } else { return Err(CheckErrorKind::ExpectedSequence(Box::new(seq_type)).into()); }; diff --git a/clarity/src/vm/tests/conversions.rs b/clarity/src/vm/tests/conversions.rs index 4f3c8cda57e..fb26f20572a 100644 --- a/clarity/src/vm/tests/conversions.rs +++ b/clarity/src/vm/tests/conversions.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use clarity_types::types::MAX_TO_ASCII_BUFFER_LEN; use stacks_common::types::StacksEpochId; pub use crate::vm::analysis::errors::CheckErrorKind; use crate::vm::tests::test_clarity_versions; -use crate::vm::types::signatures::MAX_TO_ASCII_BUFFER_LEN; use crate::vm::types::SequenceSubtype::BufferType; use crate::vm::types::TypeSignature::SequenceType; use crate::vm::types::{ @@ -314,8 +314,8 @@ fn test_simple_string_to_int() { execute_v2(wrong_type_error_test).unwrap_err(), CheckErrorKind::UnionTypeValueError( vec![ - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, ], Box::new(Value::Int(1)) ) @@ -379,8 +379,8 @@ fn test_simple_string_to_uint() { execute_v2(wrong_type_error_test).unwrap_err(), CheckErrorKind::UnionTypeValueError( vec![ - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, ], Box::new(Value::Int(1)) ) diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 49a3eaf86f0..6b046250877 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -10,7 +10,7 @@ use crate::vm::{ errors::CheckErrorKind, functions::principals::PrincipalConstructErrorCode, types::TypeSignature::PrincipalType, - types::{ResponseData, TypeSignature, BUFF_1, BUFF_20}, + types::{ResponseData, TypeSignature}, }; use crate::vm::{execute_with_parameters, ClarityVersion}; @@ -906,7 +906,7 @@ fn test_principal_construct_check_errors() { let input = r#"(principal-construct? 0x590493 0x0102030405060708091011121314151617181920)"#; assert_eq!( Err(CheckErrorKind::TypeValueError( - Box::new(BUFF_1.clone()), + Box::new(TypeSignature::BUFFER_1), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("590493").unwrap() }))), @@ -924,10 +924,11 @@ fn test_principal_construct_check_errors() { // `CheckErrorKind`. let input = r#"(principal-construct? u22 0x0102030405060708091011121314151617181920)"#; assert_eq!( - Err( - CheckErrorKind::TypeValueError(Box::new(BUFF_1.clone()), Box::new(Value::UInt(22)),) - .into() - ), + Err(CheckErrorKind::TypeValueError( + Box::new(TypeSignature::BUFFER_1), + Box::new(Value::UInt(22)), + ) + .into()), execute_with_parameters( input, ClarityVersion::Clarity2, @@ -948,7 +949,7 @@ fn test_principal_construct_check_errors() { ) .unwrap_err(), CheckErrorKind::TypeValueError( - Box::new(BUFF_20.clone()), + Box::new(TypeSignature::BUFFER_20), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("010203040506070809101112131415161718192021").unwrap() }))), @@ -960,7 +961,7 @@ fn test_principal_construct_check_errors() { let input = r#"(principal-construct? 0x16 0x0102030405060708091011121314151617181920 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")"#; assert_eq!( Err(CheckErrorKind::TypeValueError( - Box::new(TypeSignature::contract_name_string_ascii_type().unwrap()), + Box::new(TypeSignature::CONTRACT_NAME_STRING_ASCII_MAX), Box::new(Value::Sequence(SequenceData::String(CharType::ASCII( ASCIIData { data: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index fab241d294c..3fd92b6d055 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -114,15 +114,15 @@ fn test_index_of() { let bad_expected = [ CheckErrorKind::ExpectedSequence(Box::new(TypeSignature::IntType)), CheckErrorKind::TypeValueError( - Box::new(TypeSignature::min_buffer().unwrap()), + Box::new(TypeSignature::BUFFER_MIN), Box::new(execute("\"a\"").unwrap().unwrap()), ), CheckErrorKind::TypeValueError( - Box::new(TypeSignature::min_string_utf8().unwrap()), + Box::new(TypeSignature::STRING_UTF8_MIN), Box::new(execute("\"a\"").unwrap().unwrap()), ), CheckErrorKind::TypeValueError( - Box::new(TypeSignature::min_string_ascii().unwrap()), + Box::new(TypeSignature::STRING_ASCII_MIN), Box::new(execute("u\"a\"").unwrap().unwrap()), ), ]; diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index d798c83baa3..3a67aeae0dd 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -577,14 +577,14 @@ fn test_secp256k1_errors() { ]; let expectations: &[VmExecutionError] = &[ - CheckErrorKind::TypeValueError(Box::new(BUFF_32.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), - CheckErrorKind::TypeValueError(Box::new(BUFF_65.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130100").unwrap() })))).into(), + CheckErrorKind::TypeValueError(Box::new(TypeSignature::BUFFER_32), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), + CheckErrorKind::TypeValueError(Box::new(TypeSignature::BUFFER_65), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130100").unwrap() })))).into(), CheckErrorKind::IncorrectArgumentCount(2, 1).into(), CheckErrorKind::IncorrectArgumentCount(2, 3).into(), - CheckErrorKind::TypeValueError(Box::new(BUFF_32.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), - CheckErrorKind::TypeValueError(Box::new(BUFF_65.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130111").unwrap() })))).into(), - CheckErrorKind::TypeValueError(Box::new(BUFF_33.clone()), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("03adb8de4bfb65db2cfd6120d55c6526ae9c52e675db7e47308636534ba7").unwrap() })))).into(), + CheckErrorKind::TypeValueError(Box::new(TypeSignature::BUFFER_32), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("de5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f").unwrap() })))).into(), + CheckErrorKind::TypeValueError(Box::new(TypeSignature::BUFFER_65), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a130111").unwrap() })))).into(), + CheckErrorKind::TypeValueError(Box::new(TypeSignature::BUFFER_33), Box::new(Value::Sequence(SequenceData::Buffer(BuffData { data: hex_bytes("03adb8de4bfb65db2cfd6120d55c6526ae9c52e675db7e47308636534ba7").unwrap() })))).into(), CheckErrorKind::IncorrectArgumentCount(3, 2).into(), CheckErrorKind::IncorrectArgumentCount(1, 2).into(), @@ -1010,9 +1010,9 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_buffer().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, + TypeSignature::BUFFER_MAX, ], Box::new(Value::Int(0)), ) @@ -1021,9 +1021,9 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_buffer().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, + TypeSignature::BUFFER_MAX, ], Box::new(Value::Int(0)), ) @@ -1044,9 +1044,9 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_buffer().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, + TypeSignature::BUFFER_MAX, ], Box::new(Value::Sequence(SequenceData::String(CharType::ASCII( ASCIIData { @@ -1059,9 +1059,9 @@ fn test_sequence_comparisons_mismatched_types() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_string_ascii().unwrap(), - TypeSignature::max_string_utf8().unwrap(), - TypeSignature::max_buffer().unwrap(), + TypeSignature::STRING_ASCII_MAX, + TypeSignature::STRING_UTF8_MAX, + TypeSignature::BUFFER_MAX, ], Box::new(Value::Sequence(SequenceData::String(CharType::ASCII( ASCIIData { @@ -1471,7 +1471,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) @@ -1480,7 +1480,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) @@ -1489,7 +1489,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) @@ -1498,7 +1498,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) @@ -1508,7 +1508,7 @@ fn test_hash_errors() { vec![ TypeSignature::IntType, TypeSignature::UIntType, - TypeSignature::max_buffer().unwrap(), + TypeSignature::BUFFER_MAX, ], Box::new(Value::Bool(true)), ) diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 0986e160d57..3ef4d04b739 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -32,8 +32,7 @@ use crate::vm::errors::CheckErrorKind; pub use crate::vm::types::signatures::{ parse_name_type_pairs, AssetIdentifier, BufferLength, FixedFunction, FunctionArg, FunctionSignature, FunctionType, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, BUFF_1, BUFF_20, - BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, + StringUTF8Length, TupleTypeSignature, TypeSignature, TypeSignatureExt, }; use crate::vm::ClarityVersion; @@ -81,7 +80,9 @@ impl BlockInfoProperty { use self::BlockInfoProperty::*; match self { Time | MinerSpendWinner | MinerSpendTotal | BlockReward => TypeSignature::UIntType, - IdentityHeaderHash | VrfSeed | HeaderHash | BurnchainHeaderHash => BUFF_32.clone(), + IdentityHeaderHash | VrfSeed | HeaderHash | BurnchainHeaderHash => { + TypeSignature::BUFFER_32 + } MinerAddress => TypeSignature::PrincipalType, } } @@ -91,15 +92,15 @@ impl BurnBlockInfoProperty { pub fn type_result(&self) -> std::result::Result { use self::BurnBlockInfoProperty::*; let result = match self { - HeaderHash => BUFF_32.clone(), + HeaderHash => TypeSignature::BUFFER_32, PoxAddrs => TupleTypeSignature::try_from(vec![ ( "addrs".into(), TypeSignature::list_of( TypeSignature::TupleType( TupleTypeSignature::try_from(vec![ - ("version".into(), BUFF_1.clone()), - ("hashbytes".into(), BUFF_32.clone()), + ("version".into(), TypeSignature::BUFFER_1), + ("hashbytes".into(), TypeSignature::BUFFER_32), ]) .map_err(|_| { CheckErrorKind::Expects( @@ -127,7 +128,7 @@ impl StacksBlockInfoProperty { use self::StacksBlockInfoProperty::*; match self { Time => TypeSignature::UIntType, - IndexHeaderHash | HeaderHash => BUFF_32.clone(), + IndexHeaderHash | HeaderHash => TypeSignature::BUFFER_32, } } } @@ -137,7 +138,7 @@ impl TenureInfoProperty { use self::TenureInfoProperty::*; match self { Time | MinerSpendWinner | MinerSpendTotal | BlockReward => TypeSignature::UIntType, - VrfSeed | BurnchainHeaderHash => BUFF_32.clone(), + VrfSeed | BurnchainHeaderHash => TypeSignature::BUFFER_32, MinerAddress => TypeSignature::PrincipalType, } } diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 4d35554ca52..aadf25f846d 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -19,9 +19,7 @@ use std::fmt; pub use clarity_types::types::signatures::{ AssetIdentifier, BufferLength, CallableSubtype, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, ASCII_40, BUFF_1, BUFF_16, BUFF_20, - BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_TO_ASCII_BUFFER_LEN, TO_ASCII_MAX_BUFF, - TO_ASCII_RESPONSE_STRING, UTF8_40, + StringUTF8Length, TupleTypeSignature, TypeSignature, }; pub use clarity_types::types::Value; use stacks_common::types::StacksEpochId; diff --git a/contrib/stacks-inspect/src/lib.rs b/contrib/stacks-inspect/src/lib.rs index 444a016dbd6..4557efbba18 100644 --- a/contrib/stacks-inspect/src/lib.rs +++ b/contrib/stacks-inspect/src/lib.rs @@ -120,7 +120,7 @@ pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { +pub fn command_validate_block(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -202,7 +202,7 @@ pub fn command_replay_block(argv: &[String], conf: Option<&Config>) { /// /// Arguments: /// - `argv`: Args in CLI format: ` [args...]` -pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&Config>) { +pub fn command_validate_block_nakamoto(argv: &[String], conf: Option<&Config>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); diff --git a/contrib/stacks-inspect/src/main.rs b/contrib/stacks-inspect/src/main.rs index 5743a8fc586..e225149bb97 100644 --- a/contrib/stacks-inspect/src/main.rs +++ b/contrib/stacks-inspect/src/main.rs @@ -21,8 +21,8 @@ use clarity::types::StacksEpochId; use clarity::types::chainstate::StacksPrivateKey; use clarity_cli::DEFAULT_CLI_EPOCH; use stacks_inspect::{ - command_contract_hash, command_replay_block, command_replay_block_nakamoto, - command_replay_mock_mining, command_try_mine, drain_common_opts, + command_contract_hash, command_replay_mock_mining, command_try_mine, command_validate_block, + command_validate_block_nakamoto, drain_common_opts, }; use stackslib::chainstate::stacks::miner::BlockBuilderSettings; use stackslib::chainstate::stacks::{ @@ -1586,13 +1586,13 @@ check if the associated microblocks can be downloaded return; } - if argv[1] == "replay-block" { - command_replay_block(&argv[1..], common_opts.config.as_ref()); + if argv[1] == "validate-block" { + command_validate_block(&argv[1..], common_opts.config.as_ref()); process::exit(0); } - if argv[1] == "replay-naka-block" { - command_replay_block_nakamoto(&argv[1..], common_opts.config.as_ref()); + if argv[1] == "validate-naka-block" { + command_validate_block_nakamoto(&argv[1..], common_opts.config.as_ref()); process::exit(0); } diff --git a/contrib/tools/block-replay.sh b/contrib/tools/block-validation.sh similarity index 80% rename from contrib/tools/block-replay.sh rename to contrib/tools/block-validation.sh index 15f2d468432..397814d0d75 100755 --- a/contrib/tools/block-replay.sh +++ b/contrib/tools/block-validation.sh @@ -2,33 +2,28 @@ set -o pipefail -## Using 10 cpu cores, a full replay will take between 12-14 hours (assuming there are no other cpu/io bound processes running at the same time) +## Using 10 cpu cores, a full validation will take between 12-14 hours (assuming there are no other cpu/io bound processes running at the same time) ## ## ** Recommend to run this script in screen or tmux ** ## -## We'll need ~73GB per slice, plus an extra ~400GB for the chainstate archive and marf DB -## as of 02/2025: -## for 10 slices, this is about 1.1TB -## - 149GB for compressed chainstate -## - 232GB decompressed marf db -## - 73GB per slice dir (1 dir per cpu) -## for 15 slices, this is about 1.46TB -## for 20 slices, this is about 1.8TB +## We'll need ~217GB per slice, plus an extra ~4500GB for the chainstate archive and marf DB +## as of 09/2025: +## for 10 slices, this is about 2.5TB -NETWORK="mainnet" ## network to replay -REPO_DIR="$HOME/stacks-core" ## where to build the source -REMOTE_REPO="stacks-network/stacks-core" ## remote git repo to build stacks-inspect from -SCRATCH_DIR="$HOME/scratch" ## root folder for the replay slices -TIMESTAMP=$(date +%Y-%m-%d-%s) ## use a simple date format year-month-day-epoch -LOG_DIR="$HOME/replay_${TIMESTAMP}" ## location of logfiles for the replay -SLICE_DIR="${SCRATCH_DIR}/slice" ## location of slice dirs -TMUX_SESSION="replay" ## tmux session name to run the replay -TERM_OUT=false ## terminal friendly output -TESTING=false ## only run a replay on a few thousand blocks -BRANCH="develop" ## default branch to build stacks-inspect from -CORES=$(grep -c processor /proc/cpuinfo) ## retrieve total number of CORES on the system -RESERVED=8 ## reserve this many CORES for other processes as default -LOCAL_CHAINSTATE= ## path to local chainstate to use instead of snapshot download +NETWORK="mainnet" ## network to validate +REPO_DIR="$HOME/stacks-core" ## where to build the source +REMOTE_REPO="stacks-network/stacks-core" ## remote git repo to build stacks-inspect from +SCRATCH_DIR="$HOME/scratch" ## root folder for the validation slices +TIMESTAMP=$(date +%Y-%m-%d-%s) ## use a simple date format year-month-day-epoch +LOG_DIR="$HOME/block-validation_${TIMESTAMP}" ## location of logfiles for the validation +SLICE_DIR="${SCRATCH_DIR}/slice" ## location of slice dirs +TMUX_SESSION="validation" ## tmux session name to run the validation +TERM_OUT=false ## terminal friendly output +TESTING=false ## only run a validation on a few thousand blocks +BRANCH="develop" ## default branch to build stacks-inspect from +CORES=$(grep -c processor /proc/cpuinfo) ## retrieve total number of CORES on the system +RESERVED=8 ## reserve this many CORES for other processes as default +LOCAL_CHAINSTATE= ## path to local chainstate to use instead of snapshot download ## ansi color codes for terminal output COLRED=$'\033[31m' ## Red @@ -66,15 +61,15 @@ build_stacks_inspect() { } else echo "Cloning stacks-core ${BRANCH}" - (git clone "https://github.com/${REMOTE_REPO}" --branch "${BRANCH}" "${REPO_DIR}" && cd "${REPO_DIR}") || { + (git clone "https://github.com/${REMOTE_REPO}" --branch "${BRANCH}" "${REPO_DIR}" && cd "${REPO_DIR}") || { echo "${COLRED}Error${COLRESET} cloning https://github.com/${REMOTE_REPO} into ${REPO_DIR}" exit 1 } fi git pull - ## build stacks-inspect to: $HOME/stacks-inspect/target/release/stacks-inspect + ## build stacks-inspect to: ${REPO_DIR}/target/release/stacks-inspect echo "Building stacks-inspect binary" - cargo build --bin=stacks-inspect --release || { + cd contrib/stacks-inspect && cargo build --bin=stacks-inspect --release || { echo "${COLRED}Error${COLRESET} building stacks-inspect binary" exit 1 } @@ -82,7 +77,7 @@ build_stacks_inspect() { } ## create the slice dirs from an chainstate archive (symlinking marf.sqlite.blobs), 1 dir per CPU -configure_replay_slices() { +configure_validation_slices() { if [ -d "$HOME/scratch" ]; then echo "Deleting existing scratch dir: ${COLYELLOW}$HOME/scratch${COLRESET}" rm -rf "${HOME}/scratch" || { @@ -134,9 +129,9 @@ configure_replay_slices() { } ## setup the tmux sessions and create the logdir for storing output -setup_replay() { +setup_validation() { ## if there is an existing folder, rm it - if [ -d "${LOG_DIR}" ];then + if [ -d "${LOG_DIR}" ];then echo "Removing logdir ${LOG_DIR}" rm -rf "${LOG_DIR}" fi @@ -145,7 +140,7 @@ setup_replay() { echo "Creating logdir ${LOG_DIR}" mkdir -p "${LOG_DIR}" fi - ## if tmux session "replay" exists, kill it and start anew + ## if tmux session "${TMUX_SESSION}" exists, kill it and start anew if eval "tmux list-windows -t ${TMUX_SESSION} &> /dev/null"; then echo "Killing existing tmux session: ${TMUX_SESSION}" eval "tmux kill-session -t ${TMUX_SESSION} &> /dev/null" @@ -165,9 +160,9 @@ setup_replay() { return 0 } -## run the block replay -start_replay() { - local mode=$1 +## run the block validation +start_validation() { + local mode=$1 local total_blocks=0 local starting_block=0 local inspect_command @@ -177,11 +172,11 @@ start_replay() { ## nakamoto blocks echo "Mode: ${COLYELLOW}${mode}${COLRESET}" local log_append="_${mode}" - inspect_command="replay-naka-block" + inspect_command="validate-naka-block" ## get the total number of nakamoto blocks in db total_blocks=$(echo "select count(*) from nakamoto_block_headers" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) starting_block=0 # for the block counter, start at this block - ## use these values if `--testing` arg is provided (only replay 1_000 blocks) + ## use these values if `--testing` arg is provided (only validate 1_000 blocks) ${TESTING} && total_blocks=301883 ${TESTING} && starting_block=300883 ;; @@ -189,21 +184,21 @@ start_replay() { ## pre-nakamoto blocks echo "Mode: ${COLYELLOW}pre-nakamoto${COLRESET}" local log_append="" - inspect_command="replay-block" + inspect_command="validate-block" ## get the total number of blocks (with orphans) in db total_blocks=$(echo "select count(*) from staging_blocks where orphaned = 0" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) starting_block=0 # for the block counter, start at this block - ## use these values if `--testing` arg is provided (only replay 1_000 blocks) Note: 2.5 epoch is at 153106 + ## use these values if `--testing` arg is provided (only validate 1_000 blocks) Note: 2.5 epoch is at 153106 ${TESTING} && total_blocks=153000 ${TESTING} && starting_block=152000 ;; esac - local block_diff=$((total_blocks - starting_block)) ## how many blocks are being replayed - local slices=$((CORES - RESERVED)) ## how many replay slices to use - local slice_blocks=$((block_diff / slices)) ## how many blocks to replay per slice + local block_diff=$((total_blocks - starting_block)) ## how many blocks are being validated + local slices=$((CORES - RESERVED)) ## how many validation slices to use + local slice_blocks=$((block_diff / slices)) ## how many blocks to validate per slice ${TESTING} && echo "${COLRED}Testing: ${TESTING}${COLRESET}" echo "Total blocks: ${COLYELLOW}${total_blocks}${COLRESET}" - echo "Staring Block: ${COLYELLOW}$starting_block${COLRESET}" + echo "Starting Block: ${COLYELLOW}$starting_block${COLRESET}" echo "Block diff: ${COLYELLOW}$block_diff${COLRESET}" echo "******************************************************" echo "Total slices: ${COLYELLOW}${slices}${COLRESET}" @@ -215,9 +210,9 @@ start_replay() { if [[ "${end_block_count}" -gt "${total_blocks}" ]] || [[ "${slice_counter}" -eq $((slices - 1)) ]]; then end_block_count="${total_blocks}" fi - if [ "${mode}" != "nakamoto" ]; then ## don't create the tmux windows if we're replaying nakamoto blocks (they should already exist). TODO: check if it does exist in case the function call order changes + if [ "${mode}" != "nakamoto" ]; then ## don't create the tmux windows if we're validating nakamoto blocks (they should already exist). TODO: check if it does exist in case the function call order changes if [ "${slice_counter}" -gt 0 ];then - tmux new-window -t replay -d -n "slice${slice_counter}" || { + tmux new-window -t "${TMUX_SESSION}" -d -n "slice${slice_counter}" || { echo "${COLRED}Error${COLRESET} creating tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" exit 1 } @@ -226,12 +221,12 @@ start_replay() { local log_file="${LOG_DIR}/slice${slice_counter}${log_append}.log" local log=" | tee -a ${log_file}" local cmd="${REPO_DIR}/target/release/stacks-inspect --config ${REPO_DIR}/stackslib/conf/${NETWORK}-follower-conf.toml ${inspect_command} ${SLICE_DIR}${slice_counter} index-range $start_block_count $end_block_count 2>/dev/null" - echo " Creating tmux window: ${COLGREEN}replay:slice${slice_counter}${COLRESET} :: Blocks: ${COLYELLOW}${start_block_count}-${end_block_count}${COLRESET} || Logging to: ${log_file}" + echo " Creating tmux window: ${COLGREEN}${TMUX_SESSION}:slice${slice_counter}${COLRESET} :: Blocks: ${COLYELLOW}${start_block_count}-${end_block_count}${COLRESET} || Logging to: ${log_file}" echo "Command: ${cmd}" > "${log_file}" ## log the command being run for the slice - echo "Replaying indexed blocks: ${start_block_count}-${end_block_count} (out of ${total_blocks})" >> "${log_file}" - ## send `cmd` to the tmux window where the replay will run + echo "Validating indexed blocks: ${start_block_count}-${end_block_count} (out of ${total_blocks})" >> "${log_file}" + ## send `cmd` to the tmux window where the validation will run tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "${cmd}${log}" Enter || { - echo "${COLRED}Error${COLRESET} sending replay command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" + echo "${COLRED}Error${COLRESET} sending stacks-inspect command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" exit 1 } ## log the return code as the last line @@ -258,12 +253,12 @@ check_progress() { sleep 1 done echo "************************************************************************" - echo "Checking Block Replay status" + echo "Checking Block Validation status" echo -e ' ' while true; do count=$(pgrep -c "stacks-inspect") if [ "${count}" -gt 0 ]; then - ${TERM_OUT} && printf "Block replay processes are currently active [ %s%s%s%s ] ... \b${sp:progress++%${#sp}:1} \033[0K\r" "${COLYELLOW}" "${COLBOLD}" "${count}" "${COLRESET}" + ${TERM_OUT} && printf "Block validation processes are currently active [ %s%s%s%s ] ... \b${sp:progress++%${#sp}:1} \033[0K\r" "${COLYELLOW}" "${COLBOLD}" "${count}" "${COLRESET}" else ${TERM_OUT} && printf "\r\n" break @@ -302,10 +297,10 @@ store_results() { return_code=$(tail -1 "${file}") case ${return_code} in 0) - # block replay ran successfully + # block validation ran successfully ;; 1) - # block replay had some block failures + # block validation had some block failures failed=1 ;; *) @@ -355,10 +350,10 @@ store_results() {
_EOF_ - ## use the $failed var here in case there is a panic, then $failure_count may show zero, but the replay was not successful + ## use the $failed var here in case there is a panic, then $failure_count may show zero, but the validation was not successful if [ ${failed} == "1" ];then output=$(grep -r -h "Failed processing block" slice*.log) - IFS=$'\n' + IFS=$'\n' for line in ${output}; do echo "
${line}
" >> "${results_html}" || { echo "${COLRED}Error${COLRESET} writing failure to: ${results_html}" @@ -382,12 +377,12 @@ usage() { echo " ${COLBOLD}${0}${COLRESET}" echo " ${COLYELLOW}--testing${COLRESET}: only check a small number of blocks" echo " ${COLYELLOW}-t|--terminal${COLRESET}: more terminal friendly output" - echo " ${COLYELLOW}-n|--network${COLRESET}: run block replay against specific network (default: mainnet)" + echo " ${COLYELLOW}-n|--network${COLRESET}: run block validation against specific network (default: mainnet)" echo " ${COLYELLOW}-b|--branch${COLRESET}: branch of stacks-core to build stacks-inspect from (default: develop)" echo " ${COLYELLOW}-c|--chainstate${COLRESET}: local chainstate copy to use instead of downloading a chainstaet snapshot" echo " ${COLYELLOW}-l|--logdir${COLRESET}: use existing log directory" echo " ${COLYELLOW}-r|--reserved${COLRESET}: how many cpu cores to reserve for system tasks" - echo + echo echo " ex: ${COLCYAN}${0} -t -u ${COLRESET}" echo exit 0 @@ -447,11 +442,11 @@ done while [ ${#} -gt 0 ]; do case ${1} in --testing) - # only replay 1_000 blocks + # only validate 1_000 blocks TESTING=true ;; -t|--terminal) - # update terminal with progress (it's just printf to show in real-time that the replays are running) + # update terminal with progress (it's just printf to show in real-time that the validations are running) TERM_OUT=true ;; -n|--network) @@ -490,16 +485,16 @@ while [ ${#} -gt 0 ]; do LOG_DIR="${2}" shift ;; - -r|--RESERVED) + -r|--RESERVED) # reserve this many cpus for the system (default is 10) - if [ "${2}" == "" ]; then + if [ "${2}" == "" ]; then echo "Missing required value for ${1}" fi if ! [[ "$2" =~ ^[0-9]+$ ]]; then echo "ERROR: arg ($2) is not a number." >&2 exit 1 fi - RESERVED=${2} + RESERVED=${2} shift ;; -h|--help|--usage) @@ -513,11 +508,11 @@ done ## clear display before starting tput reset -echo "Replay Started: ${COLYELLOW}$(date)${COLRESET}" -build_stacks_inspect ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) -configure_replay_slices ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) -setup_replay ## configure logdir and tmux sessions -start_replay ## replay pre-nakamoto blocks (2.x) -start_replay nakamoto ## replay nakamoto blocks -store_results ## store aggregated results of replay -echo "Replay finished: $(date)" +echo "Validation Started: ${COLYELLOW}$(date)${COLRESET}" +build_stacks_inspect ## comment if using an existing chainstate/slice dir (ex: validation was performed already, and a second run is desired) +configure_validation_slices ## comment if using an existing chainstate/slice dir (ex: validation was performed already, and a second run is desired) +setup_validation ## configure logdir and tmux sessions +start_validation ## validate pre-nakamoto blocks (2.x) +start_validation nakamoto ## validate nakamoto blocks +store_results ## store aggregated results of validation +echo "Validation finished: $(date)" diff --git a/docs/release-process.md b/docs/release-process.md index 00928e48a01..0e87ce60135 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -58,7 +58,7 @@ The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/to - A label should be applied to each such issue/PR as `X.Y.Z.A.n-blocker`. -3. Perform a [block-replay](../contrib/tools/block-replay.sh) using an existing chainstate, or sync from genesis +3. Perform a [block-validation](../contrib/tools/block-validation.sh) using an existing chainstate, or sync from genesis 4. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick some commits into the release branch or open a PR against the release branch. diff --git a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 5a54d6121c0..506a75d2d67 100644 --- a/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -93,7 +93,10 @@ pub struct BitcoinRegtestController { burnchain_config: Option, ongoing_block_commit: Option, should_keep_running: Option>, - rpc_client: BitcoinRpcClient, + /// Optional Bitcoin RPC client used to interact with a `bitcoind` node. + /// - For **miner** node this field must be always `Some`. + /// - For **other** node (e.g. follower node), this field is `None`. + rpc_client: Option, } #[derive(Clone)] @@ -371,8 +374,7 @@ impl BitcoinRegtestController { should_keep_running: should_keep_running.clone(), }; - let rpc_client = BitcoinRpcClient::from_stx_config(&config) - .expect("unable to instantiate the RPC client!"); + let rpc_client = Self::create_rpc_client_unchecked(&config); Self { use_coordinator: coordinator_channel, @@ -421,8 +423,7 @@ impl BitcoinRegtestController { should_keep_running: None, }; - let rpc_client = BitcoinRpcClient::from_stx_config(&config) - .expect("unable to instantiate the RPC client!"); + let rpc_client = Self::create_rpc_client_unchecked(&config); Self { use_coordinator: None, @@ -477,6 +478,32 @@ impl BitcoinRegtestController { } } + /// Attempt to create a new [`BitcoinRpcClient`] from the given [`Config`]. + /// + /// If the provided config indicates that the node is a **miner**, + /// tries to instantiate it or **panics** otherwise. + /// If the node is **not** a miner, returns None (e.g. follower node). + fn create_rpc_client_unchecked(config: &Config) -> Option { + config.node.miner.then(|| { + BitcoinRpcClient::from_stx_config(&config) + .expect("unable to instantiate the RPC client for miner node!") + }) + } + + /// Attempt to get a reference to the underlying [`BitcoinRpcClient`]. + /// + /// This function will panic if the RPC client has not been configured + /// (i.e. [`Self::create_rpc_client_unchecked`] returned `None` during initialization), + /// but an attempt is made to use it anyway. + /// + /// In practice, this means the node is expected to act as a miner, + /// yet no [`BitcoinRpcClient`] was created or properly configured. + fn get_rpc_client(&self) -> &BitcoinRpcClient { + self.rpc_client + .as_ref() + .expect("BUG: BitcoinRpcClient is required, but it has not been configured properly!") + } + /// Helium (devnet) blocks receiver. Returns the new burnchain tip. fn receive_blocks_helium(&mut self) -> BurnchainTip { let mut burnchain = self.get_burnchain(); @@ -686,7 +713,7 @@ impl BitcoinRegtestController { /// Retrieve all loaded wallets. pub fn list_wallets(&self) -> BitcoinRegtestControllerResult> { - Ok(self.rpc_client.list_wallets()?) + Ok(self.get_rpc_client().list_wallets()?) } /// Checks if the config-supplied wallet exists. @@ -695,7 +722,7 @@ impl BitcoinRegtestController { let wallets = self.list_wallets()?; let wallet = self.get_wallet_name(); if !wallets.contains(wallet) { - self.rpc_client.create_wallet(wallet, Some(true))? + self.get_rpc_client().create_wallet(wallet, Some(true))? } Ok(()) } @@ -1861,7 +1888,7 @@ impl BitcoinRegtestController { const UNCAPPED_FEE: f64 = 0.0; const MAX_BURN_AMOUNT: u64 = 1_000_000; - self.rpc_client + self.get_rpc_client() .send_raw_transaction(tx, Some(UNCAPPED_FEE), Some(MAX_BURN_AMOUNT)) .map(|txid| { debug!("Transaction {txid} sent successfully"); @@ -1933,7 +1960,9 @@ impl BitcoinRegtestController { .expect("FATAL: invalid public key bytes"); let address = self.get_miner_address(StacksEpochId::Epoch21, &public_key); - let result = self.rpc_client.generate_to_address(num_blocks, &address); + let result = self + .get_rpc_client() + .generate_to_address(num_blocks, &address); /* Temporary: not using `BitcoinRpcClientResultExt::ok_or_log_panic` (test code related), because we need this logic available outside `#[cfg(test)]` due to Helium network. @@ -1966,7 +1995,7 @@ impl BitcoinRegtestController { .expect("FATAL: invalid public key bytes"); let address = self.get_miner_address(StacksEpochId::Epoch21, &public_key); - self.rpc_client + self.get_rpc_client() .generate_block(&address, &[]) .ok_or_log_panic("generating block") } @@ -1975,7 +2004,7 @@ impl BitcoinRegtestController { #[cfg(test)] pub fn invalidate_block(&self, block: &BurnchainHeaderHash) { info!("Invalidating block {block}"); - self.rpc_client + self.get_rpc_client() .invalidate_block(block) .ok_or_log_panic("invalidate block") } @@ -1983,7 +2012,7 @@ impl BitcoinRegtestController { /// Retrieve the hash (as a [`BurnchainHeaderHash`]) of the block at the given height. #[cfg(test)] pub fn get_block_hash(&self, height: u64) -> BurnchainHeaderHash { - self.rpc_client + self.get_rpc_client() .get_block_hash(height) .unwrap_or_log_panic("retrieve block") } @@ -2041,7 +2070,7 @@ impl BitcoinRegtestController { /// Retrieves a raw [`Transaction`] by its [`Txid`] #[cfg(test)] pub fn get_raw_transaction(&self, txid: &Txid) -> Transaction { - self.rpc_client + self.get_rpc_client() .get_raw_transaction(txid) .unwrap_or_log_panic("retrieve raw tx") } @@ -2069,7 +2098,7 @@ impl BitcoinRegtestController { "Generate to address '{address}' for public key '{}'", &pks[0].to_hex() ); - self.rpc_client + self.get_rpc_client() .generate_to_address(num_blocks, &address) .ok_or_log_panic("generating block"); return; @@ -2087,7 +2116,7 @@ impl BitcoinRegtestController { &pk.to_hex(), ); } - self.rpc_client + self.get_rpc_client() .generate_to_address(1, &address) .ok_or_log_panic("generating block"); } @@ -2105,7 +2134,7 @@ impl BitcoinRegtestController { /// * `false` if the transaction is unconfirmed or could not be found. pub fn is_transaction_confirmed(&self, txid: &Txid) -> bool { match self - .rpc_client + .get_rpc_client() .get_transaction(self.get_wallet_name(), txid) { Ok(info) => info.confirmations > 0, @@ -2158,7 +2187,7 @@ impl BitcoinRegtestController { ); let descriptor = format!("addr({address})"); - let info = self.rpc_client.get_descriptor_info(&descriptor)?; + let info = self.get_rpc_client().get_descriptor_info(&descriptor)?; let descr_req = ImportDescriptorsRequest { descriptor: format!("addr({address})#{}", info.checksum), @@ -2166,7 +2195,7 @@ impl BitcoinRegtestController { internal: Some(true), }; - self.rpc_client + self.get_rpc_client() .import_descriptors(self.get_wallet_name(), &[&descr_req])?; } Ok(()) @@ -2227,11 +2256,11 @@ impl BitcoinRegtestController { utxos_to_exclude: &Option, block_height: u64, ) -> BitcoinRpcClientResult { - let bhh = self.rpc_client.get_block_hash(block_height)?; + let bhh = self.get_rpc_client().get_block_hash(block_height)?; const MIN_CONFIRMATIONS: u64 = 0; const MAX_CONFIRMATIONS: u64 = 9_999_999; - let unspents = self.rpc_client.list_unspent( + let unspents = self.get_rpc_client().list_unspent( &self.get_wallet_name(), Some(MIN_CONFIRMATIONS), Some(MAX_CONFIRMATIONS), @@ -2420,6 +2449,7 @@ mod tests { use std::env::{self, temp_dir}; use std::fs::File; use std::io::Write; + use std::panic::{self, AssertUnwindSafe}; use stacks::burnchains::BurnchainSigner; use stacks::config::DEFAULT_SATS_PER_VB; @@ -2430,7 +2460,9 @@ mod tests { use super::*; use crate::burnchains::bitcoin::core_controller::BitcoinCoreController; - use crate::burnchains::bitcoin_regtest_controller::tests::utils::to_address_legacy; + use crate::burnchains::bitcoin_regtest_controller::tests::utils::{ + create_follower_config, create_miner_config, to_address_legacy, + }; use crate::Keychain; mod utils { @@ -2444,8 +2476,9 @@ mod tests { use crate::burnchains::bitcoin::core_controller::BURNCHAIN_CONFIG_PEER_PORT_DISABLED; use crate::util::get_epoch_time_nanos; - pub fn create_config() -> Config { + pub fn create_miner_config() -> Config { let mut config = Config::default(); + config.node.miner = true; config.burnchain.magic_bytes = "T3".as_bytes().into(); config.burnchain.username = Some(String::from("user")); config.burnchain.password = Some(String::from("12345")); @@ -2683,6 +2716,18 @@ mod tests { burn_header_hash: BurnchainHeaderHash([0u8; 32]), } } + + pub fn create_follower_config() -> Config { + let mut config = Config::default(); + config.node.miner = false; + config.burnchain.magic_bytes = "T3".as_bytes().into(); + config.burnchain.username = None; + config.burnchain.password = None; + config.burnchain.peer_host = String::from("127.0.0.1"); + config.burnchain.peer_port = 8333; + config.node.working_dir = format!("/tmp/follower"); + config + } } #[test] @@ -2738,7 +2783,7 @@ mod tests { ]; // test serialize_tx() - let config = utils::create_config(); + let config = utils::create_miner_config(); let mut btc_controller = BitcoinRegtestController::new(config, None); let mut utxo_set = UTXOSet { @@ -2857,7 +2902,7 @@ mod tests { #[test] fn test_to_epoch_aware_pubkey() { - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); let pubkey = utils::create_miner1_pubkey(); config.miner.segwit = false; @@ -2895,7 +2940,7 @@ mod tests { #[test] fn test_get_miner_address() { - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); let pub_key = utils::create_miner1_pubkey(); config.miner.segwit = false; @@ -2933,6 +2978,86 @@ mod tests { ); } + #[test] + fn test_instantiate_with_burnchain_on_follower_node_ok() { + let config = create_follower_config(); + + let btc_controller = BitcoinRegtestController::with_burnchain(config, None, None, None); + + let result = panic::catch_unwind(AssertUnwindSafe(|| { + _ = btc_controller.get_rpc_client(); + })); + assert!( + result.is_err(), + "Invoking any Bitcoin RPC related method should panic." + ); + } + + #[test] + fn test_instantiate_with_burnchain_on_miner_node_ok() { + let config = create_miner_config(); + + let btc_controller = BitcoinRegtestController::with_burnchain(config, None, None, None); + + let _ = btc_controller.get_rpc_client(); + assert!(true, "Invoking any Bitcoin RPC related method should work."); + } + + #[test] + fn test_instantiate_with_burnchain_on_miner_node_failure() { + let mut config = create_miner_config(); + config.burnchain.username = None; + config.burnchain.password = None; + + let result = panic::catch_unwind(|| { + _ = BitcoinRegtestController::with_burnchain(config, None, None, None); + }); + assert!( + result.is_err(), + "Bitcoin RPC credentials are mandatory for miner node." + ); + } + + #[test] + fn test_instantiate_new_dummy_on_follower_node_ok() { + let config = create_follower_config(); + + let btc_controller = BitcoinRegtestController::new_dummy(config); + + let result = panic::catch_unwind(AssertUnwindSafe(|| { + _ = btc_controller.get_rpc_client(); + })); + assert!( + result.is_err(), + "Invoking any Bitcoin RPC related method should panic." + ); + } + + #[test] + fn test_instantiate_new_dummy_on_miner_node_ok() { + let config = create_miner_config(); + + let btc_controller = BitcoinRegtestController::new_dummy(config); + + let _ = btc_controller.get_rpc_client(); + assert!(true, "Invoking any Bitcoin RPC related method should work."); + } + + #[test] + fn test_instantiate_new_dummy_on_miner_node_failure() { + let mut config = create_miner_config(); + config.burnchain.username = None; + config.burnchain.password = None; + + let result = panic::catch_unwind(|| { + _ = BitcoinRegtestController::new_dummy(config); + }); + assert!( + result.is_err(), + "Bitcoin RPC credentials are mandatory for miner node." + ); + } + #[test] #[ignore] fn test_create_wallet_from_default_empty_name() { @@ -2940,7 +3065,7 @@ mod tests { return; } - let config = utils::create_config(); + let config = utils::create_miner_config(); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); btcd_controller @@ -2964,7 +3089,11 @@ mod tests { #[test] #[ignore] fn test_create_wallet_from_custom_name() { - let mut config = utils::create_config(); + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut config = utils::create_miner_config(); config.burnchain.wallet_name = String::from("mywallet"); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -2992,7 +3121,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3020,7 +3149,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3057,7 +3186,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); config.burnchain.max_unspent_utxos = Some(10); @@ -3085,7 +3214,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3126,7 +3255,7 @@ mod tests { let miner1_pubkey = utils::create_miner1_pubkey(); let miner2_pubkey = utils::create_miner2_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner1_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3164,7 +3293,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3212,7 +3341,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3243,7 +3372,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3271,7 +3400,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3307,7 +3436,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3337,7 +3466,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let config = utils::create_config(); + let config = utils::create_miner_config(); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); btcd_controller @@ -3366,7 +3495,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let config = utils::create_config(); + let config = utils::create_miner_config(); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); btcd_controller @@ -3400,7 +3529,7 @@ mod tests { let miner_pubkey = utils::create_miner1_pubkey(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.miner.segwit = true; let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3436,7 +3565,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3495,7 +3624,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3544,7 +3673,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3595,7 +3724,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3678,7 +3807,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3751,7 +3880,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3796,7 +3925,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3846,7 +3975,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3896,7 +4025,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3932,7 +4061,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -3972,7 +4101,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -4017,7 +4146,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -4064,7 +4193,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -4097,7 +4226,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); @@ -4138,7 +4267,7 @@ mod tests { let miner_pubkey = keychain.get_pub_key(); let mut op_signer = keychain.generate_op_signer(); - let mut config = utils::create_config(); + let mut config = utils::create_miner_config(); config.burnchain.local_mining_public_key = Some(miner_pubkey.to_hex()); let mut btcd_controller = BitcoinCoreController::from_stx_config(&config); diff --git a/stacks-node/src/burnchains/rpc/bitcoin_rpc_client/mod.rs b/stacks-node/src/burnchains/rpc/bitcoin_rpc_client/mod.rs index 58eed7e3d02..bef521be80d 100644 --- a/stacks-node/src/burnchains/rpc/bitcoin_rpc_client/mod.rs +++ b/stacks-node/src/burnchains/rpc/bitcoin_rpc_client/mod.rs @@ -363,6 +363,10 @@ pub type BitcoinRpcClientResult = Result; impl BitcoinRpcClient { /// Create a [`BitcoinRpcClient`] from Stacks Configuration, mainly using [`stacks::config::BurnchainConfig`] + /// + /// # Notes + /// `username` and `password` configuration are mandatory (`bitcoind` requires authentication for rpc calls), + /// so a [`BitcoinRpcClientError::MissingCredentials`] is returned otherwise, pub fn from_stx_config(config: &Config) -> BitcoinRpcClientResult { let host = config.burnchain.peer_host.clone(); let port = config.burnchain.rpc_port; diff --git a/stacks-node/src/tests/neon_integrations.rs b/stacks-node/src/tests/neon_integrations.rs index 32a974f3096..b11bf78af94 100644 --- a/stacks-node/src/tests/neon_integrations.rs +++ b/stacks-node/src/tests/neon_integrations.rs @@ -6294,9 +6294,10 @@ fn antientropy_integration_test() { let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); let target_height = 3 + (3 * burnchain_config.pox_constants.reward_cycle_length); + let conf_bootstrap_node_threaded = conf_bootstrap_node.clone(); let bootstrap_node_thread = thread::spawn(move || { let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_bootstrap_node.clone(), + conf_bootstrap_node_threaded.clone(), None, Some(burnchain_config.clone()), None, @@ -6306,7 +6307,7 @@ fn antientropy_integration_test() { eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); + let mut run_loop = neon::RunLoop::new(conf_bootstrap_node_threaded.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -6397,7 +6398,7 @@ fn antientropy_integration_test() { ); let btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_follower_node.clone(), + conf_bootstrap_node.clone(), None, Some(burnchain_config), None, diff --git a/stackslib/src/chainstate/mod.rs b/stackslib/src/chainstate/mod.rs index 3887650d8e8..0d848acf634 100644 --- a/stackslib/src/chainstate/mod.rs +++ b/stackslib/src/chainstate/mod.rs @@ -24,3 +24,5 @@ pub mod burn; pub mod coordinator; pub mod nakamoto; pub mod stacks; +#[cfg(test)] +pub mod tests; diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 5d5108441f6..6eb9ad2b60c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -60,6 +60,7 @@ use crate::chainstate::stacks::{ TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionSmartContract, TransactionVersion, }; +use crate::chainstate::tests::TestChainstateConfig; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; use crate::net::relay::{BlockAcceptResponse, Relayer}; @@ -176,8 +177,9 @@ pub fn boot_nakamoto<'a>( peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.chain_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; // Create some balances for test Stackers let mut stacker_balances = test_stackers @@ -201,15 +203,40 @@ pub fn boot_nakamoto<'a>( }) .collect(); - peer_config.initial_balances.append(&mut stacker_balances); - peer_config.initial_balances.append(&mut signer_balances); - peer_config.initial_balances.append(&mut initial_balances); - peer_config.burnchain.pox_constants.v2_unlock_height = 21; - peer_config.burnchain.pox_constants.pox_3_activation_height = 26; - peer_config.burnchain.pox_constants.v3_unlock_height = 27; - peer_config.burnchain.pox_constants.pox_4_activation_height = 31; - peer_config.test_stackers = Some(test_stackers.to_vec()); - peer_config.test_signers = Some(test_signers.clone()); + peer_config + .chain_config + .initial_balances + .append(&mut stacker_balances); + peer_config + .chain_config + .initial_balances + .append(&mut signer_balances); + peer_config + .chain_config + .initial_balances + .append(&mut initial_balances); + peer_config + .chain_config + .burnchain + .pox_constants + .v2_unlock_height = 21; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_3_activation_height = 26; + peer_config + .chain_config + .burnchain + .pox_constants + .v3_unlock_height = 27; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height = 31; + peer_config.chain_config.test_stackers = Some(test_stackers.to_vec()); + peer_config.chain_config.test_signers = Some(test_signers.clone()); let mut peer = TestPeer::new_with_observer(peer_config, observer); advance_to_nakamoto(&mut peer, test_signers, test_stackers); @@ -220,13 +247,18 @@ pub fn boot_nakamoto<'a>( /// Make a replay peer, used for replaying the blockchain pub fn make_replay_peer<'a>(peer: &mut TestPeer<'a>) -> TestPeer<'a> { let mut replay_config = peer.config.clone(); - replay_config.test_name = format!("{}.replay", &peer.config.test_name); + replay_config.chain_config.test_name = + format!("{}.replay", &peer.config.chain_config.test_name); replay_config.server_port = 0; replay_config.http_port = 0; - replay_config.test_stackers = peer.config.test_stackers.clone(); - - let test_stackers = replay_config.test_stackers.clone().unwrap_or_default(); - let mut test_signers = replay_config.test_signers.clone().unwrap(); + replay_config.chain_config.test_stackers = peer.config.chain_config.test_stackers.clone(); + + let test_stackers = replay_config + .chain_config + .test_stackers + .clone() + .unwrap_or_default(); + let mut test_signers = replay_config.chain_config.test_signers.clone().unwrap(); let mut replay_peer = TestPeer::new(replay_config); let observer = TestEventObserver::new(); advance_to_nakamoto( @@ -237,12 +269,12 @@ pub fn make_replay_peer<'a>(peer: &mut TestPeer<'a>) -> TestPeer<'a> { // sanity check let replay_tip = { - let sort_db = replay_peer.sortdb.as_ref().unwrap(); + let sort_db = replay_peer.sortdb_ref(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); tip }; let tip = { - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.sortdb_ref(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let sort_ic = sort_db.index_conn(); let ancestor_tip = SortitionDB::get_ancestor_snapshot( @@ -330,7 +362,12 @@ fn replay_reward_cycle( stacks_blocks: &[NakamotoBlock], ) { eprintln!("\n\n=============================================\nBegin replay\n==============================================\n"); - let reward_cycle_length = peer.config.burnchain.pox_constants.reward_cycle_length as usize; + let reward_cycle_length = peer + .config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length as usize; let reward_cycle_indices: Vec = (0..stacks_blocks.len()) .step_by(reward_cycle_length) .collect(); @@ -339,8 +376,8 @@ fn replay_reward_cycle( let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); } - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); @@ -353,7 +390,7 @@ fn replay_reward_cycle( info!("Process Nakamoto block {} ({:?}", &block_id, &block.header); let accepted = Relayer::process_new_nakamoto_block( - &peer.config.burnchain, + &peer.config.chain_config.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, @@ -367,7 +404,7 @@ fn replay_reward_cycle( )); if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {block_id}"); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { test_debug!("Did NOT accept Nakamoto block {block_id}"); blocks_to_process.push(block); @@ -375,8 +412,8 @@ fn replay_reward_cycle( } } - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); peer.check_nakamoto_migration(); } @@ -400,8 +437,8 @@ fn test_simple_nakamoto_coordinator_bootup() { tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, @@ -414,8 +451,8 @@ fn test_simple_nakamoto_coordinator_bootup() { .map(|(block, _, _)| block) .collect(); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap(); @@ -463,8 +500,8 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); // do a stx transfer in each block to a given recipient let recipient_addr = @@ -502,8 +539,8 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { .collect(); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -527,8 +564,8 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { replay_reward_cycle(&mut replay_peer, &[burn_ops], &blocks); let tip = { - let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = replay_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut replay_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -563,8 +600,8 @@ impl TestPeer<'_> { G: FnMut(&mut NakamotoBlock) -> bool, { let nakamoto_tip = { - let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; - let sort_db = self.sortdb.as_mut().unwrap(); + let chainstate = &mut self.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -593,7 +630,7 @@ impl TestPeer<'_> { G: FnMut(&mut NakamotoBlock) -> bool, { let sender_addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_key)); - let mut test_signers = self.config.test_signers.clone().unwrap(); + let mut test_signers = self.config.chain_config.test_signers.clone().unwrap(); let recipient_addr = StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); @@ -643,7 +680,7 @@ impl TestPeer<'_> { let (burn_height, _, consensus_hash) = self.next_burnchain_block(burn_ops); let pox_constants = self.sortdb().pox_constants.clone(); let first_burn_height = self.sortdb().first_block_height; - let mut test_signers = self.config.test_signers.clone().unwrap(); + let mut test_signers = self.config.chain_config.test_signers.clone().unwrap(); info!( "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", @@ -656,12 +693,12 @@ impl TestPeer<'_> { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let nakamoto_tip = - if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + if let Some(nakamoto_parent_tenure) = self.chain.nakamoto_parent_tenure_opt.as_ref() { nakamoto_parent_tenure.last().as_ref().unwrap().block_id() } else { let tip = { - let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; - let sort_db = self.sortdb.as_mut().unwrap(); + let chainstate = &mut self.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -669,16 +706,19 @@ impl TestPeer<'_> { tip.index_block_hash() }; - let miner_addr = self.miner.origin_address().unwrap(); + let miner_addr = self.chain.miner.origin_address().unwrap(); let miner_acct = self.get_account(&nakamoto_tip, &miner_addr.to_account_principal()); let tenure_change_tx = self + .chain .miner .make_nakamoto_tenure_change_with_nonce(tenure_change, miner_acct.nonce); - let coinbase_tx = - self.miner - .make_nakamoto_coinbase_with_nonce(None, vrf_proof, miner_acct.nonce + 1); + let coinbase_tx = self.chain.miner.make_nakamoto_coinbase_with_nonce( + None, + vrf_proof, + miner_acct.nonce + 1, + ); self.make_nakamoto_tenure_and( tenure_change_tx, @@ -746,12 +786,12 @@ impl TestPeer<'_> { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let nakamoto_tip = - if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + if let Some(nakamoto_parent_tenure) = self.chain.nakamoto_parent_tenure_opt.as_ref() { nakamoto_parent_tenure.last().as_ref().unwrap().block_id() } else { let tip = { - let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; - let sort_db = self.sortdb.as_mut().unwrap(); + let chainstate = &mut self.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -759,16 +799,19 @@ impl TestPeer<'_> { tip.index_block_hash() }; - let miner_addr = self.miner.origin_address().unwrap(); + let miner_addr = self.chain.miner.origin_address().unwrap(); let miner_acct = self.get_account(&nakamoto_tip, &miner_addr.to_account_principal()); let tenure_change_tx = self + .chain .miner .make_nakamoto_tenure_change_with_nonce(tenure_change, miner_acct.nonce); - let coinbase_tx = - self.miner - .make_nakamoto_coinbase_with_nonce(None, vrf_proof, miner_acct.nonce + 1); + let coinbase_tx = self.chain.miner.make_nakamoto_coinbase_with_nonce( + None, + vrf_proof, + miner_acct.nonce + 1, + ); let block = self.mine_single_block_tenure_at_tip( &nakamoto_tip, @@ -813,7 +856,7 @@ fn block_descendant() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -902,7 +945,7 @@ fn block_info_tests(use_primary_testnet: bool) { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -1337,7 +1380,7 @@ fn pox_treatment() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -1550,8 +1593,8 @@ fn pox_treatment() { blocks.push(block); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -1590,7 +1633,7 @@ fn transactions_indexing() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -1614,7 +1657,7 @@ fn transactions_indexing() { let tracked_block_id = tracked_block.block_id(); - let chainstate = &peer.stacks_node.unwrap().chainstate; + let chainstate = &peer.chain.stacks_node.unwrap().chainstate; // compare transactions to what has been tracked for tx in tracked_block.txs { @@ -1655,7 +1698,7 @@ fn transactions_not_indexing() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -1679,7 +1722,7 @@ fn transactions_not_indexing() { let untracked_block_id = untracked_block.block_id(); - let chainstate = &peer.stacks_node.unwrap().chainstate; + let chainstate = &peer.chain.stacks_node.unwrap().chainstate; // ensure untracked transactions are not recorded for tx in untracked_block.txs { @@ -1721,13 +1764,13 @@ fn test_nakamoto_chainstate_getters() { ); let sort_tip = { - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.chain.sortdb.as_ref().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; { // scope this to drop the chainstate ref and db tx - let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let sort_handle = sort_db.index_handle(&sort_tip.sortition_id); // no tenures yet @@ -1753,8 +1796,8 @@ fn test_nakamoto_chainstate_getters() { tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); // do a stx transfer in each block to a given recipient let recipient_addr = @@ -1792,8 +1835,8 @@ fn test_nakamoto_chainstate_getters() { .collect(); let tip = { - let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -1811,14 +1854,10 @@ fn test_nakamoto_chainstate_getters() { &blocks.last().unwrap().header ); - let sort_tip = { - let sort_db = peer.sortdb.as_ref().unwrap(); - SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() - }; + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb_ref().conn()).unwrap(); { // scope this to drop the chainstate ref and db tx - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_ref().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; for coinbase_height in 0..=((tip .anchored_header @@ -1856,8 +1895,8 @@ fn test_nakamoto_chainstate_getters() { debug!("\n======================================\nBegin tests\n===========================================\n"); { // scope this to drop the chainstate ref and db tx - let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); // we now have a tenure, and it confirms the last epoch2 block @@ -1999,8 +2038,14 @@ fn test_nakamoto_chainstate_getters() { next_tenure_change.tenure_consensus_hash = next_consensus_hash.clone(); next_tenure_change.burn_view_consensus_hash = next_consensus_hash.clone(); - let next_tenure_change_tx = peer.miner.make_nakamoto_tenure_change(next_tenure_change); - let next_coinbase_tx = peer.miner.make_nakamoto_coinbase(None, next_vrf_proof); + let next_tenure_change_tx = peer + .chain + .miner + .make_nakamoto_tenure_change(next_tenure_change); + let next_coinbase_tx = peer + .chain + .miner + .make_nakamoto_coinbase(None, next_vrf_proof); // make the second tenure's blocks let blocks_and_sizes = peer.make_nakamoto_tenure( @@ -2035,13 +2080,13 @@ fn test_nakamoto_chainstate_getters() { .collect(); let sort_tip = { - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.sortdb_ref(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; { // scope this to drop the chainstate ref and db tx - let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); @@ -2215,7 +2260,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a let mut consensus_hashes = vec![]; let mut fee_counts = vec![]; let mut total_blocks = 0; - let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_key = peer.chain.miner.nakamoto_miner_key(); let stx_miner_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -2235,9 +2280,10 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("Next burnchain block: {}", &consensus_hash); @@ -2296,11 +2342,12 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a // if we're starting a new reward cycle, then save the current one let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; if peer .config + .chain_config .burnchain .is_naka_signing_cycle_start(tip.block_height) { @@ -2324,8 +2371,8 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a // in nakamoto, tx fees are rewarded by the next tenure, so the // scheduled rewards come 1 tenure after the coinbase reward matures let miner = p2pkh_from(&stx_miner_key); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); // this is sortition height 12, and this miner has earned all 12 of the coinbases // plus the initial per-block mining bonus of 2600 STX, but minus the last three rewards (since @@ -2389,8 +2436,8 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a } let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2411,8 +2458,8 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a // verify that matured miner records were in place let mut matured_rewards = vec![]; { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let (mut chainstate_tx, _) = chainstate.chainstate_tx_begin().unwrap(); for i in 0..24 { let matured_reward_opt = NakamotoChainState::get_matured_miner_reward_schedules( @@ -2502,8 +2549,8 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a } let tip = { - let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = replay_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut replay_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2565,9 +2612,10 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); rc_burn_ops.push(burn_ops); @@ -2609,8 +2657,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> all_blocks.append(&mut blocks.clone()); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2630,8 +2678,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> // highest tenure is our tenure-change let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -2668,7 +2716,10 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> blocks.last().cloned().unwrap().header.block_id(), blocks.len() as u32, ); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change_extend); + let tenure_change_tx = peer + .chain + .miner + .make_nakamoto_tenure_change(tenure_change_extend); let blocks_and_sizes = peer.make_nakamoto_tenure_extension( tenure_change_tx, @@ -2703,8 +2754,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> all_blocks.append(&mut blocks.clone()); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2725,8 +2776,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> // highest tenure is our tenure-extend let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -2759,8 +2810,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); rc_burn_ops.push(burn_ops); @@ -2802,8 +2853,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> all_blocks.append(&mut blocks.clone()); let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2823,8 +2874,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> // highest tenure is our new tenure-change let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -2854,8 +2905,8 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> replay_reward_cycle(&mut replay_peer, &rc_burn_ops, &all_blocks); let tip = { - let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = replay_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut replay_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -2912,7 +2963,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe let mut rc_burn_ops = vec![]; let mut consensus_hashes = vec![]; let mut fee_counts = vec![]; - let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_key = peer.chain.miner.nakamoto_miner_key(); for i in 0..10 { let (burn_ops, mut tenure_change, miner_key) = @@ -2924,9 +2975,10 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("Next burnchain block: {}", &consensus_hash); @@ -3002,8 +3054,8 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe // check that our tenure-extends have been getting applied let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -3037,11 +3089,12 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe // if we're starting a new reward cycle, then save the current one let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; if peer .config + .chain_config .burnchain .is_naka_signing_cycle_start(tip.block_height) { @@ -3062,8 +3115,8 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe // in nakamoto, tx fees are rewarded by the next tenure, so the // scheduled rewards come 1 tenure after the coinbase reward matures let miner = p2pkh_from(&stx_miner_key); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); // this is sortition height 12, and this miner has earned all 12 of the coinbases // plus the initial per-block mining bonus of 2600 STX, but minus the last three rewards (since @@ -3111,13 +3164,9 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe } eprintln!( - "Checking block #{} ({},{}): {} =?= {} + {}", - i, - &ch, + "Checking block #{i} ({ch},{}): {} =?= {expected_total_coinbase} + {expected_total_tx_fees}", &sn.block_height, - stx_balance.amount_unlocked(), - expected_total_coinbase, - expected_total_tx_fees + stx_balance.amount_unlocked() ); assert_eq!( stx_balance.amount_unlocked(), @@ -3126,8 +3175,8 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe } let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -3153,8 +3202,8 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe } let tip = { - let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = replay_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut replay_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -3208,7 +3257,7 @@ fn process_next_nakamoto_block_deadlock() { }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -3226,6 +3275,7 @@ fn process_next_nakamoto_block_deadlock() { let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); let mut sortition_db = peer.sortdb().reopen().unwrap(); let (chainstate, _) = &mut peer + .chain .stacks_node .as_mut() .unwrap() @@ -3314,13 +3364,14 @@ fn test_stacks_on_burnchain_ops() { ); let mut all_blocks: Vec = vec![]; - let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_key = peer.chain.miner.nakamoto_miner_key(); let mut extra_burn_ops = vec![]; let mut bitpatterns = HashMap::new(); // map consensus hash to txid bit pattern let cur_reward_cycle = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(peer.get_burn_block_height()) .unwrap(); @@ -3417,9 +3468,10 @@ fn test_stacks_on_burnchain_ops() { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("Next burnchain block: {}", &consensus_hash); @@ -3517,8 +3569,8 @@ fn test_stacks_on_burnchain_ops() { // check that our tenure-extends have been getting applied let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -3608,8 +3660,8 @@ fn test_stacks_on_burnchain_ops() { } let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 4c285874d1b..40ca8eb1b34 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1991,7 +1991,7 @@ fn test_make_miners_stackerdb_config() { None, ); - let naka_miner_hash160 = peer.miner.nakamoto_miner_hash160(); + let naka_miner_hash160 = peer.chain.miner.nakamoto_miner_hash160(); let miner_keys: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); let miner_hash160s: Vec<_> = miner_keys .iter() @@ -2009,8 +2009,8 @@ fn test_make_miners_stackerdb_config() { debug!("miners = {:#?}", &miner_hash160s); // extract chainstate, sortdb, and stackerdbs -- we don't need the peer anymore - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let mut last_snapshot = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let stackerdbs = peer.network.stackerdbs; let miners_contract_id = boot_code_id(MINERS_NAME, false); diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 2b657ccf27c..e7a6135d180 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -34,15 +34,12 @@ use crate::chainstate::burn::operations::{ use crate::chainstate::burn::*; use crate::chainstate::coordinator::tests::NullEventDispatcher; use crate::chainstate::coordinator::{ChainsCoordinator, OnChainRewardSetProvider}; -use crate::chainstate::nakamoto::coordinator::{ - get_nakamoto_next_recipients, load_nakamoto_reward_set, -}; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::staging_blocks::{ NakamotoBlockObtainMethod, NakamotoStagingBlocksConnRef, }; use crate::chainstate::nakamoto::test_signers::TestSigners; -use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, StacksDBIndexed, }; @@ -696,6 +693,7 @@ impl TestStacksNode { mut after_block: G, malleablize: bool, mined_canonical: bool, + timestamp: Option, ) -> Result)>, ChainstateError> where S: FnMut(&mut NakamotoBlockBuilder), @@ -804,6 +802,10 @@ impl TestStacksNode { &coinbase.clone().unwrap(), ) }; + // Optionally overwrite the timestamp to enable predictable blocks. + if let Some(timestamp) = timestamp { + builder.header.timestamp = timestamp; + } miner_setup(&mut builder); tenure_change = None; @@ -1060,82 +1062,82 @@ impl TestStacksNode { } } -impl TestPeer<'_> { - /// Get the Nakamoto parent linkage data for building atop the last-produced tenure or - /// Stacks 2.x block. - /// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) - fn get_nakamoto_parent( - miner: &TestMiner, - stacks_node: &TestStacksNode, - sortdb: &SortitionDB, - ) -> ( - StacksBlockId, - Option, - Option>, - ) { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { - debug!("Parent will be a Nakamoto block"); - - // parent is an epoch 3 nakamoto block - let first_parent = parent_blocks.first().unwrap(); - debug!("First parent is {:?}", first_parent); +/// Get the Nakamoto parent linkage data for building atop the last-produced tenure or +/// Stacks 2.x block. +/// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) +pub fn get_nakamoto_parent( + miner: &TestMiner, + stacks_node: &TestStacksNode, + sortdb: &SortitionDB, +) -> ( + StacksBlockId, + Option, + Option>, +) { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { + debug!("Parent will be a Nakamoto block"); + + // parent is an epoch 3 nakamoto block + let first_parent = parent_blocks.first().unwrap(); + debug!("First parent is {:?}", first_parent); + + // sanity check -- this parent must correspond to a sortition + assert!( + SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &first_parent.header.consensus_hash, + ) + .unwrap() + .unwrap() + .sortition + ); - // sanity check -- this parent must correspond to a sortition - assert!( - SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &first_parent.header.consensus_hash, - ) - .unwrap() - .unwrap() - .sortition + let last_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + (last_tenure_id, None, Some(parent_blocks)) + } else { + // parent may be an epoch 2.x block + let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = + stacks_node.get_last_anchored_block(miner) + { + debug!("Parent will be a Stacks 2.x block"); + let ic = sortdb.index_conn(); + let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap(); + if sort_opt.is_none() { + warn!("No parent sortition in epoch2: tip.sortition_id = {}, parent_block.block_hash() = {}", &tip.sortition_id, &parent_block.block_hash()); + } + (Some(parent_block), sort_opt) + } else { + warn!( + "No parent sortition in epoch2: tip.sortition_id = {}", + &tip.sortition_id ); + (None, None) + }; - let last_tenure_id = StacksBlockId::new( - &first_parent.header.consensus_hash, - &first_parent.header.block_hash(), - ); - (last_tenure_id, None, Some(parent_blocks)) + let last_tenure_id = if let Some(last_epoch2_block) = parent_opt.as_ref() { + let parent_sort = parent_sortition_opt.as_ref().unwrap(); + StacksBlockId::new( + &parent_sort.consensus_hash, + &last_epoch2_block.header.block_hash(), + ) } else { - // parent may be an epoch 2.x block - let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = - stacks_node.get_last_anchored_block(miner) - { - debug!("Parent will be a Stacks 2.x block"); - let ic = sortdb.index_conn(); - let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &parent_block.block_hash(), - ) - .unwrap(); - if sort_opt.is_none() { - warn!("No parent sortition in epoch2: tip.sortition_id = {}, parent_block.block_hash() = {}", &tip.sortition_id, &parent_block.block_hash()); - } - (Some(parent_block), sort_opt) - } else { - warn!( - "No parent sortition in epoch2: tip.sortition_id = {}", - &tip.sortition_id - ); - (None, None) - }; - - let last_tenure_id = if let Some(last_epoch2_block) = parent_opt.as_ref() { - let parent_sort = parent_sortition_opt.as_ref().unwrap(); - StacksBlockId::new( - &parent_sort.consensus_hash, - &last_epoch2_block.header.block_hash(), - ) - } else { - // must be a genesis block (testing only!) - StacksBlockId(BOOT_BLOCK_HASH.0) - }; - (last_tenure_id, parent_opt, None) - } + // must be a genesis block (testing only!) + StacksBlockId(BOOT_BLOCK_HASH.0) + }; + (last_tenure_id, parent_opt, None) } +} +impl TestPeer<'_> { /// Start the next Nakamoto tenure. /// This generates the VRF key and block-commit txs, as well as the TenureChange and /// leader key this commit references @@ -1147,218 +1149,23 @@ impl TestPeer<'_> { TenureChangePayload, LeaderKeyRegisterOp, ) { - let mut sortdb = self.sortdb.take().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let mut burn_block = TestBurnchainBlock::new(&tip, 0); - let mut stacks_node = self.stacks_node.take().unwrap(); - - let (last_tenure_id, parent_block_opt, parent_tenure_opt) = - if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { - ( - nakamoto_parent_tenure.first().as_ref().unwrap().block_id(), - None, - Some(nakamoto_parent_tenure.clone()), - ) - } else { - Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) - }; - - // find the VRF leader key register tx to use. - // it's the one pointed to by the parent tenure - let parent_consensus_hash_and_tenure_start_id_opt = - if let Some(parent_tenure) = parent_tenure_opt.as_ref() { - let tenure_start_block = parent_tenure.first().unwrap(); - Some(( - tenure_start_block.header.consensus_hash.clone(), - tenure_start_block.block_id(), - )) - } else if let Some(parent_block) = parent_block_opt.as_ref() { - let parent_header_info = - StacksChainState::get_stacks_block_header_info_by_index_block_hash( - stacks_node.chainstate.db(), - &last_tenure_id, - ) - .unwrap() - .unwrap(); - Some(( - parent_header_info.consensus_hash.clone(), - parent_header_info.index_block_hash(), - )) - } else { - None - }; - - let last_key = if let Some((ch, parent_tenure_start_block_id)) = - parent_consensus_hash_and_tenure_start_id_opt.clone() - { - // it's possible that the parent was a shadow block. - // if so, find the highest non-shadow ancestor's block-commit, so we can - let mut cursor = ch; - let (tenure_sn, tenure_block_commit) = loop { - let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cursor) - .unwrap() - .unwrap(); - - let Some(tenure_block_commit) = get_block_commit_by_txid( - sortdb.conn(), - &tenure_sn.sortition_id, - &tenure_sn.winning_block_txid, - ) - .unwrap() else { - // parent must be a shadow block - let header = NakamotoChainState::get_block_header_nakamoto( - stacks_node.chainstate.db(), - &parent_tenure_start_block_id, - ) - .unwrap() - .unwrap() - .anchored_header - .as_stacks_nakamoto() - .cloned() - .unwrap(); - - if !header.is_shadow_block() { - panic!("Parent tenure start block ID {} has no block-commit and is not a shadow block", &parent_tenure_start_block_id); - } - - cursor = stacks_node - .chainstate - .index_conn() - .get_parent_tenure_consensus_hash(&parent_tenure_start_block_id, &cursor) - .unwrap() - .unwrap(); - - continue; - }; - break (tenure_sn, tenure_block_commit); - }; - - let tenure_leader_key = SortitionDB::get_leader_key_at( - &sortdb.index_conn(), - tenure_block_commit.key_block_ptr.into(), - tenure_block_commit.key_vtxindex.into(), - &tenure_sn.sortition_id, - ) - .unwrap() - .unwrap(); - tenure_leader_key - } else { - panic!("No leader key"); - }; - - let network_id = self.config.network_id; - let chainstate_path = self.chainstate_path.clone(); - let burn_block_height = burn_block.block_height; - - let (mut block_commit_op, tenure_change_payload) = stacks_node.begin_nakamoto_tenure( - &sortdb, - &mut self.miner, - &mut burn_block, - &last_key, - parent_block_opt.as_ref(), - parent_tenure_opt.as_ref().map(|blocks| blocks.as_slice()), - 1000, - tenure_change_cause, - ); - - // patch up block-commit -- these blocks all mine off of genesis - if last_tenure_id == StacksBlockId(BOOT_BLOCK_HASH.0) { - block_commit_op.parent_block_ptr = 0; - block_commit_op.parent_vtxindex = 0; - } - - let mut burn_ops = vec![]; - if self.miner.last_VRF_public_key().is_none() { - let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); - burn_ops.push(BlockstackOperationType::LeaderKeyRegister(leader_key_op)); - } - - // patch in reward set info - match get_nakamoto_next_recipients( - &tip, - &mut sortdb, - &mut stacks_node.chainstate, - &tenure_change_payload.previous_tenure_end, - &self.config.burnchain, - ) { - Ok(recipients) => { - block_commit_op.commit_outs = match recipients { - Some(info) => { - let mut recipients = info - .recipients - .into_iter() - .map(|x| x.0) - .collect::>(); - if recipients.len() == 1 { - recipients.push(PoxAddress::standard_burn_address(false)); - } - recipients - } - None => { - if self - .config - .burnchain - .is_in_prepare_phase(burn_block.block_height) - { - vec![PoxAddress::standard_burn_address(false)] - } else { - vec![ - PoxAddress::standard_burn_address(false), - PoxAddress::standard_burn_address(false), - ] - } - } - }; - test_debug!( - "Block commit at height {} has {} recipients: {:?}", - block_commit_op.block_height, - block_commit_op.commit_outs.len(), - &block_commit_op.commit_outs - ); - } - Err(e) => { - panic!("Failure fetching recipient set: {e:?}"); - } - }; - - burn_ops.push(BlockstackOperationType::LeaderBlockCommit(block_commit_op)); - - // prepare to mine - let miner_addr = self.miner.origin_address().unwrap(); - let miner_account = get_account(&mut stacks_node.chainstate, &sortdb, &miner_addr); - self.miner.set_nonce(miner_account.nonce); - - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); - (burn_ops, tenure_change_payload, last_key) + self.chain.begin_nakamoto_tenure(tenure_change_cause) } /// Make the VRF proof for this tenure. /// Call after processing the block-commit pub fn make_nakamoto_vrf_proof(&mut self, miner_key: LeaderKeyRegisterOp) -> VRFProof { - let sortdb = self.sortdb.take().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let proof = self - .miner - .make_proof(&miner_key.public_key, &tip.sortition_hash) - .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", miner_key.public_key)); - self.sortdb = Some(sortdb); - debug!( - "VRF proof made from {:?} over {}: {proof:?}", - miner_key.public_key, &tip.sortition_hash - ); - proof + self.chain.make_nakamoto_vrf_proof(miner_key) } pub fn try_process_block(&mut self, block: &NakamotoBlock) -> Result { - let mut sort_handle = self.sortdb.as_ref().unwrap().index_handle_at_tip(); + let mut sort_handle = self.chain.sortdb.as_ref().unwrap().index_handle_at_tip(); let stacks_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); let accepted = Relayer::process_new_nakamoto_block( - &self.config.burnchain, - self.sortdb.as_ref().unwrap(), + &self.config.chain_config.burnchain, + self.chain.sortdb.as_ref().unwrap(), &mut sort_handle, - &mut self.stacks_node.as_mut().unwrap().chainstate, + &mut self.chain.stacks_node.as_mut().unwrap().chainstate, &stacks_tip, block, None, @@ -1370,11 +1177,11 @@ impl TestPeer<'_> { let sort_tip = SortitionDB::get_canonical_sortition_tip(self.sortdb().conn()).unwrap(); let Some(block_receipt) = NakamotoChainState::process_next_nakamoto_block::( - &mut self.stacks_node.as_mut().unwrap().chainstate, - self.sortdb.as_mut().unwrap(), + &mut self.chain.stacks_node.as_mut().unwrap().chainstate, + self.chain.sortdb.as_mut().unwrap(), &sort_tip, None, - self.config.txindex, + self.config.chain_config.txindex, )? else { return Ok(false); @@ -1449,7 +1256,7 @@ impl TestPeer<'_> { let blocks = TestStacksNode::make_nakamoto_tenure_blocks( &mut stacks_node.chainstate, sortdb, - &mut peer.miner, + &mut peer.chain.miner, signers, &tenure_change .try_as_tenure_change() @@ -1458,12 +1265,13 @@ impl TestPeer<'_> { .clone(), Some(tenure_change), Some(coinbase), - &mut peer.coord, + &mut peer.chain.coord, miner_setup, block_builder, after_block, - peer.mine_malleablized_blocks, - peer.nakamoto_parent_tenure_opt.is_none(), + peer.chain.mine_malleablized_blocks, + peer.chain.nakamoto_parent_tenure_opt.is_none(), + None, )?; let just_blocks = blocks @@ -1480,7 +1288,9 @@ impl TestPeer<'_> { .flat_map(|(_, _, _, malleablized)| malleablized) .collect(); - peer.malleablized_blocks.append(&mut malleablized_blocks); + peer.chain + .malleablized_blocks + .append(&mut malleablized_blocks); let block_data = blocks .into_iter() @@ -1510,8 +1320,8 @@ impl TestPeer<'_> { &[(NakamotoBlock, u64, ExecutionCost)], ) -> Vec, { - let mut stacks_node = self.stacks_node.take().unwrap(); - let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); let tenure_extend_payload = if let TransactionPayload::TenureChange(ref tc) = &tenure_extend_tx.payload { @@ -1537,7 +1347,7 @@ impl TestPeer<'_> { let blocks = TestStacksNode::make_nakamoto_tenure_blocks( &mut stacks_node.chainstate, &mut sortdb, - &mut self.miner, + &mut self.chain.miner, signers, &tenure_extend_tx .try_as_tenure_change() @@ -1546,12 +1356,13 @@ impl TestPeer<'_> { .clone(), Some(tenure_extend_tx), None, - &mut self.coord, + &mut self.chain.coord, |_| {}, block_builder, |_| true, - self.mine_malleablized_blocks, - self.nakamoto_parent_tenure_opt.is_none(), + self.chain.mine_malleablized_blocks, + self.chain.nakamoto_parent_tenure_opt.is_none(), + None, ) .unwrap(); @@ -1569,15 +1380,17 @@ impl TestPeer<'_> { .flat_map(|(_, _, _, malleablized)| malleablized) .collect(); - self.malleablized_blocks.append(&mut malleablized_blocks); + self.chain + .malleablized_blocks + .append(&mut malleablized_blocks); let block_data = blocks .into_iter() .map(|(blk, sz, cost, _)| (blk, sz, cost)) .collect(); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); block_data } @@ -1586,8 +1399,8 @@ impl TestPeer<'_> { pub fn process_nakamoto_tenure(&mut self, blocks: Vec) { debug!("Peer will process {} Nakamoto blocks", blocks.len()); - let mut sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); @@ -1609,7 +1422,7 @@ impl TestPeer<'_> { .unwrap(); if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {}", &block_id); - self.coord.handle_new_nakamoto_stacks_block().unwrap(); + self.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); debug!("Begin check Nakamoto block {}", &block.block_id()); TestPeer::check_processed_nakamoto_block(&mut sortdb, &mut node.chainstate, block); @@ -1619,8 +1432,8 @@ impl TestPeer<'_> { } } - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); } /// Get the tenure-start block of the parent tenure of `tenure_id_consensus_hash` @@ -1757,7 +1570,7 @@ impl TestPeer<'_> { let Ok(Some(parent_block_header)) = NakamotoChainState::get_block_header(chainstate.db(), &block.header.parent_block_id) else { - panic!("No parent block for {:?}", &block); + panic!("No parent block for {block:?}"); }; // get_coinbase_height @@ -2472,8 +2285,8 @@ impl TestPeer<'_> { &naka_tip_id ); - let mut stacks_node = self.stacks_node.take().unwrap(); - let sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); let shadow_block = NakamotoBlockBuilder::make_shadow_tenure( &mut stacks_node.chainstate, @@ -2487,12 +2300,13 @@ impl TestPeer<'_> { // Get the reward set let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let reward_set = load_nakamoto_reward_set( - self.miner + self.chain + .miner .burnchain .block_height_to_reward_cycle(sort_tip_sn.block_height) .expect("FATAL: no reward cycle for sortition"), &sort_tip_sn.sortition_id, - &self.miner.burnchain, + &self.chain.miner.burnchain, &mut stacks_node.chainstate, &shadow_block.header.parent_block_id, &sortdb, @@ -2554,11 +2368,11 @@ impl TestPeer<'_> { drop(rollback_tx); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); // process it - self.coord.handle_new_nakamoto_stacks_block().unwrap(); + self.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); // verify that it processed self.refresh_burnchain_view(); diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 4ef8b2a46f8..7464c49052e 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1655,8 +1655,8 @@ pub mod test { observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, Vec) { let mut peer_config = TestPeerConfig::new(test_name, 0, 0); - peer_config.burnchain = burnchain.clone(); - peer_config.epochs = epochs; + peer_config.chain_config.burnchain = burnchain.clone(); + peer_config.chain_config.epochs = epochs; peer_config.setup_code = format!( "(contract-call? .pox set-burnchain-parameters u{} u{} u{} u{})", burnchain.first_block_height, @@ -1693,14 +1693,14 @@ pub mod test { .map(|addr| (addr.into(), (1024 * POX_THRESHOLD_STEPS_USTX) as u64)) .collect(); - peer_config.initial_balances = balances; + peer_config.chain_config.initial_balances = balances; let peer = TestPeer::new_with_observer(peer_config, observer); (peer, keys.to_vec()) } pub fn eval_at_tip(peer: &mut TestPeer, boot_contract: &str, expr: &str) -> Value { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); @@ -1711,7 +1711,7 @@ pub mod test { &boot_code_id(boot_contract, false), expr, ); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); value } @@ -1728,7 +1728,7 @@ pub mod test { name: &str, expr: &str, ) -> Value { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); @@ -1739,7 +1739,7 @@ pub mod test { &contract_id(addr, name), expr, ); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); value } @@ -1815,11 +1815,7 @@ pub mod test { addr: &PrincipalData, ) -> Option<(u128, PoxAddress, u128, u128)> { let value_opt = eval_at_tip(peer, "pox", &format!("(get-stacker-info '{addr})")); - let data = if let Some(d) = value_opt.expect_optional().unwrap() { - d - } else { - return None; - }; + let data = value_opt.expect_optional().unwrap()?; let data = data.expect_tuple().unwrap(); @@ -1855,9 +1851,9 @@ pub mod test { where F: FnOnce(&mut StacksChainState, &SortitionDB) -> R, { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let r = todo(peer.chainstate(), &sortdb); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); r } @@ -2814,8 +2810,9 @@ pub mod test { } pub fn get_current_reward_cycle(peer: &TestPeer, burnchain: &Burnchain) -> u128 { - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() as u128 @@ -2841,9 +2838,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -2908,7 +2906,7 @@ pub mod test { let mut peer_config = TestPeerConfig::new(function_name!(), 2000, 2001); let alice = StacksAddress::from_string("STVK1K405H6SK9NKJAP32GHYHDJ98MMNP8Y6Z9N0").unwrap(); let bob = StacksAddress::from_string("ST76D2FMXZ7D2719PNE4N71KPSX84XCCNCMYC940").unwrap(); - peer_config.initial_lockups = vec![ + peer_config.chain_config.initial_lockups = vec![ ChainstateAccountLockup::new(alice.clone(), 1000, 1), ChainstateAccountLockup::new(bob.clone(), 1000, 1), ChainstateAccountLockup::new(alice.clone(), 1000, 2), @@ -2968,9 +2966,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -3035,9 +3034,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); @@ -3152,9 +3152,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -3263,9 +3264,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -3474,9 +3476,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) @@ -3735,9 +3738,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4002,9 +4006,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4218,9 +4223,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); @@ -4431,9 +4437,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4680,9 +4687,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -5202,9 +5210,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -5648,9 +5657,10 @@ pub mod test { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 41ddeeec988..2749bbbf1f2 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -60,7 +60,7 @@ pub fn get_reward_set_entries_at( tip: &StacksBlockId, at_burn_ht: u64, ) -> Vec { - let burnchain = peer.config.burnchain.clone(); + let burnchain = peer.config.chain_config.burnchain.clone(); with_sortdb(peer, |ref mut c, sortdb| { get_reward_set_entries_at_block(c, &burnchain, sortdb, tip, at_burn_ht).unwrap() }) @@ -73,7 +73,7 @@ pub fn get_reward_set_entries_index_order_at( tip: &StacksBlockId, at_burn_ht: u64, ) -> Vec { - let burnchain = peer.config.burnchain.clone(); + let burnchain = peer.config.chain_config.burnchain.clone(); with_sortdb(peer, |ref mut c, sortdb| { c.get_reward_addresses(&burnchain, sortdb, at_burn_ht, tip) .unwrap() @@ -149,9 +149,15 @@ pub fn check_all_stacker_link_invariants( // For cycles where PoX-3 is active, check if Epoch24 has activated first. let active_pox_contract = peer .config + .chain_config .burnchain .pox_constants - .active_pox_contract(peer.config.burnchain.reward_cycle_to_block_height(cycle)); + .active_pox_contract( + peer.config + .chain_config + .burnchain + .reward_cycle_to_block_height(cycle), + ); if active_pox_contract == POX_3_NAME && epoch < StacksEpochId::Epoch24 { info!( "Skipping check on a PoX-3 reward cycle because Epoch24 has not started yet"; @@ -337,6 +343,7 @@ pub fn check_stacking_state_invariants( let stacking_state_unlock_ht = peer .config + .chain_config .burnchain .reward_cycle_to_block_height((first_cycle + lock_period) as u64); @@ -430,11 +437,13 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c .burn_header_height; let tip_cycle = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(current_burn_height.into()) .unwrap(); let cycle_start = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(cycle_number); @@ -446,11 +455,17 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c .unwrap() .unwrap(); - let active_pox_contract = peer.config.burnchain.pox_constants.active_pox_contract( - peer.config - .burnchain - .reward_cycle_to_block_height(cycle_number), - ); + let active_pox_contract = peer + .config + .chain_config + .burnchain + .pox_constants + .active_pox_contract( + peer.config + .chain_config + .burnchain + .reward_cycle_to_block_height(cycle_number), + ); if cycle_start_epoch.epoch_id == StacksEpochId::Epoch22 || cycle_start_epoch.epoch_id == StacksEpochId::Epoch23 @@ -467,8 +482,8 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c "Skipping validation of reward set that started in Epoch24, but its cycle starts before pox-3 activation"; "cycle" => cycle_number, "cycle_start" => cycle_start, - "pox_3_activation" => peer.config.burnchain.pox_constants.pox_3_activation_height, - "pox_4_activation" => peer.config.burnchain.pox_constants.pox_4_activation_height, + "pox_3_activation" => peer.config.chain_config.burnchain.pox_constants.pox_3_activation_height, + "pox_4_activation" => peer.config.chain_config.burnchain.pox_constants.pox_4_activation_height, "epoch_2_4_start" => cycle_start_epoch.start_height, ); return; @@ -510,7 +525,12 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c if tip_epoch.epoch_id >= StacksEpochId::Epoch24 && current_burn_height - <= peer.config.burnchain.pox_constants.pox_3_activation_height + <= peer + .config + .chain_config + .burnchain + .pox_constants + .pox_3_activation_height { // if the tip is epoch-2.4, and pox-3 isn't the active pox contract yet, // the invariant checks will not make sense for the same reasons as above @@ -519,7 +539,12 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c if tip_epoch.epoch_id >= StacksEpochId::Epoch25 && current_burn_height - <= peer.config.burnchain.pox_constants.pox_4_activation_height + <= peer + .config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height { // if the tip is epoch-2.5, and pox-5 isn't the active pox contract yet, // the invariant checks will not make sense for the same reasons as above @@ -550,11 +575,17 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c /// Get the `cycle_number`'s total stacked amount at the given chaintip pub fn get_reward_cycle_total(peer: &mut TestPeer, tip: &StacksBlockId, cycle_number: u64) -> u128 { - let active_pox_contract = peer.config.burnchain.pox_constants.active_pox_contract( - peer.config - .burnchain - .reward_cycle_to_block_height(cycle_number), - ); + let active_pox_contract = peer + .config + .chain_config + .burnchain + .pox_constants + .active_pox_contract( + peer.config + .chain_config + .burnchain + .reward_cycle_to_block_height(cycle_number), + ); with_clarity_db_ro(peer, tip, |db| { let total_stacked_key = TupleData::from_data(vec![( @@ -776,7 +807,7 @@ fn test_simple_pox_lockup_transition_pox_2() { }; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -794,7 +825,7 @@ fn test_simple_pox_lockup_transition_pox_2() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -854,7 +885,7 @@ fn test_simple_pox_lockup_transition_pox_2() { // should be accepted (checked via the tx receipt). Also, importantly, // the cost tracker should assign costs to Charlie's transaction. // This is also checked by the transaction receipt. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 9 assert_eq!(tip.block_height, 9 + EMPTY_SORTITIONS as u64); @@ -880,7 +911,7 @@ fn test_simple_pox_lockup_transition_pox_2() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10, and 11 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -904,7 +935,7 @@ fn test_simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 0); // Now, Bob tries to lock in PoX v1 too, but it shouldn't work! - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_lockup( &bob, @@ -921,7 +952,7 @@ fn test_simple_pox_lockup_transition_pox_2() { let block_id = peer.tenure_with_txs(&[bob_lockup], &mut coinbase_nonce); // our "tenure counter" is now at 12 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 12 + EMPTY_SORTITIONS as u64); // One more empty tenure to reach the unlock height let block_id = peer.tenure_with_txs(&[], &mut coinbase_nonce); @@ -932,7 +963,7 @@ fn test_simple_pox_lockup_transition_pox_2() { // At this point, the auto unlock height for v1 accounts should be reached. // let Alice stack in PoX v2 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 13 assert_eq!(tip.block_height, 13 + EMPTY_SORTITIONS as u64); @@ -963,7 +994,7 @@ fn test_simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 31 assert_eq!(tip.block_height, 31 + EMPTY_SORTITIONS as u64); @@ -1174,7 +1205,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10, and 11 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_2_lockup( &alice, @@ -1245,7 +1276,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { .unwrap(); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1470,7 +1501,7 @@ fn delegate_stack_increase() { } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx let success_alice_delegation = alice_nonce; @@ -1528,7 +1559,7 @@ fn delegate_stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1822,7 +1853,7 @@ fn stack_increase() { } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit an increase: this should fail, because Alice is not yet locked let fail_no_lock_tx = alice_nonce; @@ -1876,7 +1907,7 @@ fn stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -2031,7 +2062,7 @@ fn test_lock_period_invariant_extend_transition() { .unwrap() + 1; - eprintln!("First v2 cycle = {}", first_v2_cycle); + eprintln!("First v2 cycle = {first_v2_cycle}"); assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); let epochs = StacksEpoch::all(0, 0, EMPTY_SORTITIONS as u64 + 10); @@ -2059,7 +2090,7 @@ fn test_lock_period_invariant_extend_transition() { let ALICE_LOCKUP = 1024 * POX_THRESHOLD_STEPS_USTX; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -2074,7 +2105,7 @@ fn test_lock_period_invariant_extend_transition() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -2138,7 +2169,7 @@ fn test_lock_period_invariant_extend_transition() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // Alice _will_ auto-unlock: she can stack-extend in PoX v2 let alice_lockup = make_pox_2_extend( @@ -2311,7 +2342,7 @@ fn test_pox_extend_transition_pox_2() { }; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -2326,7 +2357,7 @@ fn test_pox_extend_transition_pox_2() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -2392,7 +2423,7 @@ fn test_pox_extend_transition_pox_2() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -2451,7 +2482,7 @@ fn test_pox_extend_transition_pox_2() { alice_rewards_to_v2_start_checks(tip_index_block, &mut peer); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 15 assert_eq!(tip.block_height, 15 + EMPTY_SORTITIONS as u64); @@ -2468,7 +2499,7 @@ fn test_pox_extend_transition_pox_2() { } // our "tenure counter" is now at 32 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 32 + EMPTY_SORTITIONS as u64); // Alice would have unlocked under v1 rules, so try to stack again via PoX 1 and expect a runtime error @@ -2736,7 +2767,7 @@ fn test_delegate_extend_transition_pox_2() { }; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -2751,7 +2782,7 @@ fn test_delegate_extend_transition_pox_2() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let delegate_tx = make_pox_contract_call( &alice, 0, @@ -2883,7 +2914,7 @@ fn test_delegate_extend_transition_pox_2() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_delegate_tx = make_pox_2_contract_call( &bob, @@ -3090,7 +3121,7 @@ fn test_delegate_extend_transition_pox_2() { alice_rewards_to_v2_start_checks(tip_index_block, &mut peer); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 15 assert_eq!(tip.block_height, 15 + EMPTY_SORTITIONS as u64); @@ -3156,7 +3187,7 @@ fn test_delegate_extend_transition_pox_2() { } // our "tenure counter" is now at 32 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 32 + EMPTY_SORTITIONS as u64); // Alice would have unlocked under v1 rules, so try to stack again via PoX 1 and expect a runtime error @@ -3185,7 +3216,7 @@ fn test_delegate_extend_transition_pox_2() { for r in b.receipts.into_iter() { if let TransactionOrigin::Stacks(ref t) = r.transaction { let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); + eprintln!("TX addr: {addr}"); if addr == alice_address { alice_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == bob_address { @@ -3375,7 +3406,7 @@ fn test_pox_2_getters() { peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -3645,8 +3676,9 @@ fn test_get_pox_addrs() { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) @@ -3923,8 +3955,9 @@ fn test_stack_with_segwit() { let microblock_privkey = StacksPrivateKey::random(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) @@ -4257,7 +4290,7 @@ fn test_pox_2_delegate_stx_addr_validation() { peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -4463,7 +4496,7 @@ fn stack_aggregation_increase() { } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx for alice let alice_delegation_1 = make_pox_2_contract_call( @@ -4529,7 +4562,7 @@ fn stack_aggregation_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -4552,7 +4585,7 @@ fn stack_aggregation_increase() { assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -4603,7 +4636,7 @@ fn stack_aggregation_increase() { bob_nonce += 1; latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -4900,7 +4933,7 @@ fn stack_in_both_pox1_and_pox2() { } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // our "tenure counter" is now at 10 assert_eq!(tip.block_height, 10 + EMPTY_SORTITIONS as u64); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 0e4c34fa005..a99d4c98c29 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -184,7 +184,7 @@ fn simple_pox_lockup_transition_pox_2() { let mut coinbase_nonce = 0; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty @@ -202,7 +202,7 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -249,7 +249,9 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 0); // produce blocks until immediately before the 2.1 epoch switch - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch21].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch21].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); // alice is still locked, balance should be 0 @@ -261,7 +263,7 @@ fn simple_pox_lockup_transition_pox_2() { // should be accepted (checked via the tx receipt). Also, importantly, // the cost tracker should assign costs to Charlie's transaction. // This is also checked by the transaction receipt. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let test = make_pox_2_contract_call( &charlie, @@ -284,7 +286,7 @@ fn simple_pox_lockup_transition_pox_2() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10, and 11 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -301,7 +303,7 @@ fn simple_pox_lockup_transition_pox_2() { let block_id = peer.tenure_with_txs(&[bob_lockup], &mut coinbase_nonce); assert_eq!( - get_tip(peer.sortdb.as_ref()).block_height as u32, + get_tip(peer.chain.sortdb.as_ref()).block_height as u32, pox_constants.v1_unlock_height + 1, "Test should have reached 1 + PoX-v1 unlock height" ); @@ -311,7 +313,7 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); // Now, Bob tries to lock in PoX v1 too, but it shouldn't work! - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_lockup( &bob, @@ -327,7 +329,7 @@ fn simple_pox_lockup_transition_pox_2() { // At this point, the auto unlock height for v1 accounts has been reached. // let Alice stack in PoX v2 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_2_lockup( &alice, @@ -347,7 +349,9 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); // now, let's roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always include this half lockup let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -364,7 +368,8 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); // now, roll the chain forward to Epoch-2.4 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always be unlocked @@ -372,7 +377,7 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); } - let tip = get_tip(peer.sortdb.as_ref()).block_height; + let tip = get_tip(peer.chain.sortdb.as_ref()).block_height; let bob_lockup = make_pox_3_lockup( &bob, 2, @@ -583,7 +588,8 @@ fn pox_auto_unlock(alice_first: bool) { let mut coinbase_nonce = 0; // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch21].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch21].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -592,7 +598,7 @@ fn pox_auto_unlock(alice_first: bool) { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10, and 11 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_2_lockup( &alice, @@ -653,7 +659,7 @@ fn pox_auto_unlock(alice_first: bool) { ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -734,7 +740,8 @@ fn pox_auto_unlock(alice_first: bool) { // now, lets check behavior in Epochs 2.2-2.4, with pox-3 auto unlock tests // produce blocks until epoch 2.2 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch22].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch22].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -747,13 +754,14 @@ fn pox_auto_unlock(alice_first: bool) { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); // produce blocks until epoch 2.4 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } // repeat the lockups as before, so we can test the pox-3 auto unlock behavior - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_3_lockup( &alice, @@ -815,7 +823,7 @@ fn pox_auto_unlock(alice_first: bool) { ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1022,13 +1030,14 @@ fn delegate_stack_increase() { let mut coinbase_nonce = 0; // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch21].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch21].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx let alice_delegation_1 = make_pox_2_contract_call( @@ -1089,7 +1098,7 @@ fn delegate_stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 1) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1208,7 +1217,9 @@ fn delegate_stack_increase() { // on pox-3 // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always include this half lockup assert_eq!( @@ -1251,12 +1262,13 @@ fn delegate_stack_increase() { ); // Roll to Epoch-2.4 and re-do the above tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx let alice_delegation_1 = make_pox_3_contract_call( @@ -1315,7 +1327,7 @@ fn delegate_stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1639,13 +1651,14 @@ fn stack_increase() { let increase_amt = total_balance - first_lockup_amt; // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch21].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch21].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } // in the next tenure, PoX 2 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit an increase: this should fail, because Alice is not yet locked let fail_no_lock_tx = alice_nonce; @@ -1691,7 +1704,7 @@ fn stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 1) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1773,7 +1786,9 @@ fn stack_increase() { // on pox-3 // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always include this half lockup assert_eq!( @@ -1802,13 +1817,14 @@ fn stack_increase() { ); // Roll to Epoch-2.4 and re-do the above stack-increase tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } // in the next tenure, PoX 3 should now exist. - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit an increase: this should fail, because Alice is not yet locked let pox_3_fail_no_lock_tx = alice_nonce; @@ -1858,7 +1874,7 @@ fn stack_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -2165,7 +2181,7 @@ fn pox_extend_transition() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -2212,12 +2228,14 @@ fn pox_extend_transition() { let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 0); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch21].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch21].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); alice_rewards_to_v2_start_checks(latest_block.clone(), &mut peer); } @@ -2226,7 +2244,7 @@ fn pox_extend_transition() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -2271,7 +2289,7 @@ fn pox_extend_transition() { // produce blocks until the v2 reward cycles start let height_target = burnchain.reward_cycle_to_block_height(first_v2_cycle) - 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // alice is still locked, balance should be 0 let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -2286,7 +2304,9 @@ fn pox_extend_transition() { // Roll to Epoch-2.4 and re-do the above tests // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should be locked, and so should bob's let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -2313,12 +2333,13 @@ fn pox_extend_transition() { assert_eq!(bob_account.amount_unlocked(), INITIAL_BALANCE); // Roll to Epoch-2.4 and re-do the above stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_3_lockup( &alice, 2, @@ -2362,11 +2383,11 @@ fn pox_extend_transition() { assert_eq!(alice_balance, 0); // advance to the first v3 reward cycle - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_3_lockup( &bob, 2, @@ -2448,7 +2469,7 @@ fn pox_extend_transition() { for r in b.receipts.into_iter() { if let TransactionOrigin::Stacks(ref t) = r.transaction { let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); + eprintln!("TX addr: {addr}"); if addr == alice_address { alice_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == bob_address { @@ -2580,21 +2601,22 @@ fn delegate_extend_pox_3() { let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; // our "tenure counter" is now at 0 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); // first tenure is empty let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } // in the next tenure, PoX 3 should now exist. // charlie will lock bob and alice through the delegation interface - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let mut alice_nonce = 0; let mut bob_nonce = 0; @@ -2818,13 +2840,13 @@ fn delegate_extend_pox_3() { } let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 0); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // Extend bob's lockup via `delegate-stack-extend` for 1 more cycle // so that we can check the first-reward-cycle is correctly updated @@ -3046,12 +3068,13 @@ fn pox_3_getters() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; // alice locks in v2 @@ -3291,7 +3314,7 @@ fn pox_3_getters() { } fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { @@ -3377,7 +3400,7 @@ fn get_pox_addrs() { let mut coinbase_nonce = 0; let assert_latest_was_burn = |peer: &mut TestPeer| { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -3393,11 +3416,16 @@ fn get_pox_addrs() { assert!(commit.burn_fee > 0); let (addrs, payout) = get_burn_pox_addr_info(peer); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; info!("Checking burn outputs at burn_height = {}", burn_height); - if peer.config.burnchain.is_in_prepare_phase(burn_height) { + if peer + .config + .chain_config + .burnchain + .is_in_prepare_phase(burn_height) + { assert_eq!(addrs.len(), 1); assert_eq!(payout, 1000); assert!(addrs[0].is_burn()); @@ -3410,7 +3438,7 @@ fn get_pox_addrs() { }; let assert_latest_was_pox = |peer: &mut TestPeer| { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -3438,18 +3466,20 @@ fn get_pox_addrs() { }; // produce blocks until epoch 2.2 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } } let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let stackers: Vec<_> = keys .iter() .zip([ @@ -3477,7 +3507,7 @@ fn get_pox_addrs() { let target_height = burnchain.reward_cycle_to_block_height(first_v3_cycle); // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < target_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height < target_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); assert_latest_was_burn(&mut peer); } @@ -3588,7 +3618,7 @@ fn stack_with_segwit() { let mut coinbase_nonce = 0; let assert_latest_was_burn = |peer: &mut TestPeer| { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -3604,11 +3634,16 @@ fn stack_with_segwit() { assert!(commit.burn_fee > 0); let (addrs, payout) = get_burn_pox_addr_info(peer); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; info!("Checking burn outputs at burn_height = {}", burn_height); - if peer.config.burnchain.is_in_prepare_phase(burn_height) { + if peer + .config + .chain_config + .burnchain + .is_in_prepare_phase(burn_height) + { assert_eq!(addrs.len(), 1); assert_eq!(payout, 1000); assert!(addrs[0].is_burn()); @@ -3621,7 +3656,7 @@ fn stack_with_segwit() { }; let assert_latest_was_pox = |peer: &mut TestPeer| { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -3649,18 +3684,20 @@ fn stack_with_segwit() { }; // produce blocks until epoch 2.2 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } } let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let stackers: Vec<_> = keys .iter() .zip([ @@ -3687,7 +3724,7 @@ fn stack_with_segwit() { let target_height = burnchain.reward_cycle_to_block_height(first_v3_cycle); // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < target_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height < target_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); assert_latest_was_burn(&mut peer); } @@ -3830,12 +3867,13 @@ fn stack_aggregation_increase() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); // submit delegation tx for alice let alice_delegation_1 = make_pox_3_contract_call( @@ -3898,7 +3936,7 @@ fn stack_aggregation_increase() { // this is one block after the reward cycle starts let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -3926,7 +3964,7 @@ fn stack_aggregation_increase() { assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -3977,7 +4015,7 @@ fn stack_aggregation_increase() { bob_nonce += 1; latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -4245,12 +4283,13 @@ fn pox_3_delegate_stx_addr_validation() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch24].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cur_reward_cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index f26a5651d4f..90ac2d21e5c 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -46,6 +46,7 @@ use crate::chainstate::stacks::boot::signers_tests::get_signer_index; use crate::chainstate::stacks::boot::{PoxVersions, MINERS_NAME}; use crate::chainstate::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use crate::chainstate::stacks::*; +use crate::chainstate::tests::TestChainstateConfig; use crate::core::*; use crate::net::test::{TestEventObserver, TestEventObserverBlock, TestPeer, TestPeerConfig}; use crate::net::tests::NakamotoBootPlan; @@ -79,9 +80,10 @@ fn make_simple_pox_4_lock( let addr = key_to_stacks_addr(key); let pox_addr = PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); let signer_pk = StacksPublicKey::from_private(key); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let next_reward_cycle = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -369,7 +371,7 @@ fn pox_extend_transition() { assert_eq!(alice_account.stx_balance.unlock_height(), 0); // next tenure include Alice's lockup - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_pox_lockup( &alice, 0, @@ -416,12 +418,14 @@ fn pox_extend_transition() { let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 0); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch21].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch21].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); alice_rewards_to_v2_start_checks(latest_block.clone(), &mut peer); } @@ -430,7 +434,7 @@ fn pox_extend_transition() { // Lets have Bob lock up for v2 // this will lock for cycles 8, 9, 10 // the first v2 cycle will be 8 - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_2_lockup( &bob, @@ -475,7 +479,7 @@ fn pox_extend_transition() { // produce blocks until the v2 reward cycles start let height_target = burnchain.reward_cycle_to_block_height(first_v2_cycle) - 1; - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // alice is still locked, balance should be 0 let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -488,7 +492,9 @@ fn pox_extend_transition() { v2_rewards_checks(latest_block, &mut peer); // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height + < epochs[StacksEpochId::Epoch22].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should be locked, and so should bob's let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -515,13 +521,13 @@ fn pox_extend_transition() { assert_eq!(bob_account.amount_unlocked(), INITIAL_BALANCE); // Roll to pox4 activation and re-do the above stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(burnchain.pox_constants.pox_4_activation_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_signer_private = Secp256k1PrivateKey::random(); let alice_signer_key = Secp256k1PublicKey::from_private(&alice_signer_private); @@ -569,7 +575,7 @@ fn pox_extend_transition() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); // check that the "raw" reward set will contain entries for alice at the cycle start @@ -598,7 +604,7 @@ fn pox_extend_transition() { assert_eq!(alice_balance, 0); // advance to the first v3 reward cycle - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -621,7 +627,7 @@ fn pox_extend_transition() { 2, ); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_pox_4_lockup( &bob, 2, @@ -818,7 +824,7 @@ fn pox_extend_transition() { } fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { @@ -900,10 +906,11 @@ fn pox_lock_unlock() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } @@ -911,11 +918,11 @@ fn pox_lock_unlock() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let reward_cycle = burnchain.block_height_to_reward_cycle(tip_height).unwrap() as u128; let stackers: Vec<_> = keys .iter() @@ -960,13 +967,13 @@ fn pox_lock_unlock() { // Advance to start of rewards cycle stackers are participating in let target_height = burnchain.pox_constants.pox_4_activation_height + 5; - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); // now we should be in the reward phase, produce the reward blocks @@ -976,7 +983,7 @@ fn pox_lock_unlock() { // Check that STX are locked for 2 reward cycles for _ in 0..lock_period { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1081,10 +1088,11 @@ fn pox_3_defunct() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } @@ -1092,11 +1100,11 @@ fn pox_3_defunct() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let stackers: Vec<_> = keys .iter() .zip([ @@ -1153,13 +1161,13 @@ fn pox_3_defunct() { // Advance to start of rewards cycle stackers are participating in let target_height = burnchain.pox_constants.pox_4_activation_height + 5; - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); // now we should be in the reward phase, produce the reward blocks @@ -1168,7 +1176,7 @@ fn pox_3_defunct() { // Check next 3 reward cycles for _ in 0..=lock_period { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1211,10 +1219,11 @@ fn pox_3_unlocks() { // Advance to a few blocks before pox 3 unlock let target_height = burnchain.pox_constants.v3_unlock_height - 14; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + if get_tip(peer.chain.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } @@ -1222,11 +1231,11 @@ fn pox_3_unlocks() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let mut txs = vec![]; - let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let tip_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let stackers: Vec<_> = keys .iter() .zip([ @@ -1264,7 +1273,7 @@ fn pox_3_unlocks() { // Check that STX are locked for 2 reward cycles for _ in 0..2 { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1308,18 +1317,18 @@ fn pox_3_unlocks() { // Advance to v3 unlock let target_height = burnchain.pox_constants.v3_unlock_height; - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); // Check that STX are not locked for 3 reward cycles after pox4 starts for _ in 0..3 { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let cycle = burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1396,7 +1405,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -1405,11 +1414,11 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let lock_period = 1; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); // stack-stx @@ -1483,7 +1492,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { steph_nonce += 1; // alice delegates STX to bob - let target_height = get_tip(peer.sortdb.as_ref()).block_height + let target_height = get_tip(peer.chain.sortdb.as_ref()).block_height + (3 * pox_constants.reward_cycle_length as u64) // 3 cycles (next cycle + 2) + 1; // additional few blocks shouldn't matter to unlock-cycle let alice_delegate = make_pox_4_delegate_stx( @@ -1497,7 +1506,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { let alice_delegate_nonce = alice_nonce; alice_nonce += 1; - let curr_height = get_tip(peer.sortdb.as_ref()).block_height; + let curr_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let bob_delegate_stack_nonce = bob_nonce; let bob_delegate_stack = make_pox_4_delegate_stack_stx( &bob, @@ -1544,7 +1553,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { &mut coinbase_nonce, )); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tipId = StacksBlockId::new(&tip.consensus_hash, &tip.canonical_stacks_tip_hash); assert_eq!(tipId, latest_block.unwrap()); @@ -1784,11 +1793,11 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } // produce blocks until the we're in the prepare phase (first block of prepare-phase was mined, i.e. pox-set for next cycle determined) - while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height) { + while !burnchain.is_in_prepare_phase(get_tip(peer.chain.sortdb.as_ref()).block_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -1797,11 +1806,11 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height, + get_tip(peer.chain.sortdb.as_ref()).block_height, ); let lock_period = 1; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); // stack-stx @@ -1875,7 +1884,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { steph_nonce += 1; // alice delegates STX to bob - let target_height = get_tip(peer.sortdb.as_ref()).block_height + let target_height = get_tip(peer.chain.sortdb.as_ref()).block_height + (3 * pox_constants.reward_cycle_length as u64) // 3 cycles (next cycle + 2) + 1; // additional few blocks shouldn't matter to unlock-cycle let alice_delegate = make_pox_4_delegate_stx( @@ -1889,7 +1898,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { let alice_delegate_nonce = alice_nonce; alice_nonce += 1; - let curr_height = get_tip(peer.sortdb.as_ref()).block_height; + let curr_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let bob_delegate_stack_nonce = bob_nonce; let bob_delegate_stack = make_pox_4_delegate_stack_stx( &bob, @@ -1936,7 +1945,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { &mut coinbase_nonce, )); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tipId = StacksBlockId::new(&tip.consensus_hash, &tip.canonical_stacks_tip_hash); assert_eq!(tipId, latest_block.clone().unwrap()); @@ -2215,11 +2224,11 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } // produce blocks until the we're in the prepare phase (first block of prepare-phase was mined, i.e. pox-set for next cycle determined) - while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height) { + while !burnchain.is_in_prepare_phase(get_tip(peer.chain.sortdb.as_ref()).block_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2228,15 +2237,15 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let lock_period = 2; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); // alice delegates STX to bob - let target_height = get_tip(peer.sortdb.as_ref()).block_height + let target_height = get_tip(peer.chain.sortdb.as_ref()).block_height + (3 * pox_constants.reward_cycle_length as u64) // 3 cycles (next cycle + 2) + 1; // additional few blocks shouldn't matter to unlock-cycle let alice_delegate = make_pox_4_delegate_stx( @@ -2250,7 +2259,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() let alice_delegate_nonce = alice_nonce; alice_nonce += 1; - let curr_height = get_tip(peer.sortdb.as_ref()).block_height; + let curr_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let bob_delegate_stack_nonce = bob_nonce; let bob_delegate_stack = make_pox_4_delegate_stack_stx( &bob, @@ -2291,7 +2300,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() &mut coinbase_nonce, )); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tipId = StacksBlockId::new(&tip.consensus_hash, &tip.canonical_stacks_tip_hash); assert_eq!(tipId, latest_block.unwrap()); @@ -2440,11 +2449,11 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } // produce blocks until the we're 1 before the prepare phase (first block of prepare-phase not yet mined, whatever txs we create now won't be included in the reward set) - while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height + 1) { + while !burnchain.is_in_prepare_phase(get_tip(peer.chain.sortdb.as_ref()).block_height + 1) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2452,7 +2461,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()) * 120 / 100; // * 1.2 @@ -2477,7 +2486,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { &steph_pox_addr, steph_lock_period, &steph_signing_key, - get_tip(peer.sortdb.as_ref()).block_height, + get_tip(peer.chain.sortdb.as_ref()).block_height, Some(signature), u128::MAX, 1, @@ -2560,11 +2569,11 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } // produce blocks until the we're in the prepare phase (first block of prepare-phase was mined, i.e. pox-set for next cycle determined) - while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height) { + while !burnchain.is_in_prepare_phase(get_tip(peer.chain.sortdb.as_ref()).block_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2572,7 +2581,7 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()) * 120 / 100; // * 1.2 @@ -2597,7 +2606,7 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { &steph_pox_addr, steph_lock_period, &steph_signing_key, - get_tip(peer.sortdb.as_ref()).block_height, + get_tip(peer.chain.sortdb.as_ref()).block_height, Some(signature), u128::MAX, 1, @@ -2681,7 +2690,7 @@ fn pox_4_delegate_stack_increase_events() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2697,7 +2706,7 @@ fn pox_4_delegate_stack_increase_events() { alice_principal.clone(), amount / 2, bob_pox_addr.clone(), - get_tip(peer.sortdb.as_ref()).block_height as u128, + get_tip(peer.chain.sortdb.as_ref()).block_height as u128, 2, ); @@ -2791,15 +2800,15 @@ fn pox_4_revoke_delegate_stx_events() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } info!( "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height + get_tip(peer.chain.sortdb.as_ref()).block_height ); - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let current_cycle = get_current_reward_cycle(&peer, &burnchain); let next_cycle = current_cycle + 1; let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); @@ -2845,7 +2854,7 @@ fn pox_4_revoke_delegate_stx_events() { // check delegate with expiry - let target_height = get_tip(peer.sortdb.as_ref()).block_height + 10; + let target_height = get_tip(peer.chain.sortdb.as_ref()).block_height + 10; let alice_delegate_2 = make_pox_4_delegate_stx( &alice, alice_nonce, @@ -2860,7 +2869,7 @@ fn pox_4_revoke_delegate_stx_events() { peer.tenure_with_txs(&[alice_delegate_2], &mut coinbase_nonce); // produce blocks until delegation expired - while get_tip(peer.sortdb.as_ref()).block_height <= target_height { + while get_tip(peer.chain.sortdb.as_ref()).block_height <= target_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -3015,7 +3024,7 @@ fn verify_signer_key_signatures() { // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -4297,31 +4306,74 @@ fn stack_agg_increase() { ) .unwrap(); - peer_config.aggregate_public_key = Some(aggregate_public_key); + peer_config.chain_config.aggregate_public_key = Some(aggregate_public_key); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); // Let us not activate nakamoto to make life easier - peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; - peer_config.initial_balances.append(&mut initial_balances); - peer_config.burnchain.pox_constants.v2_unlock_height = 81; - peer_config.burnchain.pox_constants.pox_3_activation_height = 101; - peer_config.burnchain.pox_constants.v3_unlock_height = 102; - peer_config.burnchain.pox_constants.pox_4_activation_height = 105; - peer_config.test_signers = Some(test_signers); - peer_config.burnchain.pox_constants.reward_cycle_length = 20; - peer_config.burnchain.pox_constants.prepare_length = 5; - let epochs = peer_config.epochs.clone().unwrap(); + peer_config.chain_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); // Let us not activate nakamoto to make life easier + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config + .chain_config + .initial_balances + .append(&mut initial_balances); + peer_config + .chain_config + .burnchain + .pox_constants + .v2_unlock_height = 81; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_3_activation_height = 101; + peer_config + .chain_config + .burnchain + .pox_constants + .v3_unlock_height = 102; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height = 105; + peer_config.chain_config.test_signers = Some(test_signers); + peer_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length = 20; + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length = 5; + let epochs = peer_config.chain_config.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpochId::Epoch30]; let mut peer = TestPeer::new_with_observer(peer_config, Some(&observer)); let mut peer_nonce = 0; // Set constants - let reward_cycle_len = peer.config.burnchain.pox_constants.reward_cycle_length; - let prepare_phase_len = peer.config.burnchain.pox_constants.prepare_length; + let reward_cycle_len = peer + .config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length; + let prepare_phase_len = peer + .config + .chain_config + .burnchain + .pox_constants + .prepare_length; // Advance into pox4 - let mut target_height = peer.config.burnchain.pox_constants.pox_4_activation_height; + let mut target_height = peer + .config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height; let mut latest_block = None; // Produce blocks until the first reward phase that everyone should be in while peer.get_burn_block_height() < u64::from(target_height) { @@ -4329,7 +4381,7 @@ fn stack_agg_increase() { } let latest_block = latest_block.expect("Failed to get tip"); // Current reward cycle: 5 (starts at burn block 101) - let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); + let reward_cycle = get_current_reward_cycle(&peer, &peer.config.chain_config.burnchain); let next_reward_cycle = reward_cycle.wrapping_add(1); // Current burn block height: 105 let burn_block_height = peer.get_burn_block_height(); @@ -5089,7 +5141,7 @@ fn stack_increase_different_signer_keys(use_nakamoto: bool) { } pub fn assert_latest_was_burn(peer: &mut TestPeer) { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -5105,11 +5157,16 @@ pub fn assert_latest_was_burn(peer: &mut TestPeer) { assert!(commit.burn_fee > 0); let (addrs, payout) = get_burn_pox_addr_info(peer); - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; info!("Checking burn outputs at burn_height = {burn_height}"); - if peer.config.burnchain.is_in_prepare_phase(burn_height) { + if peer + .config + .chain_config + .burnchain + .is_in_prepare_phase(burn_height) + { assert_eq!(addrs.len(), 1); assert_eq!(payout, 1000); assert!(addrs[0].is_burn()); @@ -5122,7 +5179,7 @@ pub fn assert_latest_was_burn(peer: &mut TestPeer) { } fn assert_latest_was_pox(peer: &mut TestPeer) -> Vec { - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let tip_index_block = tip.get_canonical_stacks_block_id(); let burn_height = tip.block_height - 1; @@ -6648,31 +6705,72 @@ pub fn pox_4_scenario_test_setup<'a>( ) .unwrap(); - peer_config.aggregate_public_key = Some(aggregate_public_key); + peer_config.chain_config.aggregate_public_key = Some(aggregate_public_key); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.chain_config.epochs = Some(StacksEpoch::unit_test_3_0_only(1000)); + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; peer_config + .chain_config .initial_balances .extend_from_slice(&initial_balances); - peer_config.burnchain.pox_constants.v2_unlock_height = 81; - peer_config.burnchain.pox_constants.pox_3_activation_height = 101; - peer_config.burnchain.pox_constants.v3_unlock_height = 102; - peer_config.burnchain.pox_constants.pox_4_activation_height = 105; - peer_config.test_signers = Some(test_signers); - peer_config.burnchain.pox_constants.reward_cycle_length = 20; - peer_config.burnchain.pox_constants.prepare_length = 5; + peer_config + .chain_config + .burnchain + .pox_constants + .v2_unlock_height = 81; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_3_activation_height = 101; + peer_config + .chain_config + .burnchain + .pox_constants + .v3_unlock_height = 102; + peer_config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height = 105; + peer_config.chain_config.test_signers = Some(test_signers); + peer_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length = 20; + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length = 5; let mut peer = TestPeer::new_with_observer(peer_config.clone(), Some(observer)); let mut peer_nonce = 0; - let reward_cycle_len = peer.config.burnchain.pox_constants.reward_cycle_length; - let prepare_phase_len = peer.config.burnchain.pox_constants.prepare_length; + let reward_cycle_len = peer + .config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length; + let prepare_phase_len = peer + .config + .chain_config + .burnchain + .pox_constants + .prepare_length; - let target_height = peer.config.burnchain.pox_constants.pox_4_activation_height; + let target_height = peer + .config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height; let mut latest_block = None; while peer.get_burn_block_height() < u64::from(target_height) { @@ -6681,10 +6779,10 @@ pub fn pox_4_scenario_test_setup<'a>( } let latest_block = latest_block.expect("Failed to get tip"); - let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); + let reward_cycle = get_current_reward_cycle(&peer, &peer.config.chain_config.burnchain); let next_reward_cycle = reward_cycle.wrapping_add(1); let burn_block_height = peer.get_burn_block_height(); - let current_block_height = peer.config.current_block; + let current_block_height = peer.config.chain_config.current_block; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); ( @@ -6745,8 +6843,8 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( max_amount: None, }]; let mut peer_config = TestPeerConfig::default(); - peer_config.aggregate_public_key = Some(aggregate_public_key); - let mut pox_constants = peer_config.clone().burnchain.pox_constants; + peer_config.chain_config.aggregate_public_key = Some(aggregate_public_key); + let mut pox_constants = peer_config.chain_config.burnchain.pox_constants.clone(); pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -6762,12 +6860,12 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( boot_plan.initial_balances = initial_balances; boot_plan.pox_constants = pox_constants.clone(); burnchain.pox_constants = pox_constants; - peer_config.burnchain = burnchain.clone(); - peer_config.test_signers = Some(test_signers.clone()); + peer_config.chain_config.burnchain = burnchain.clone(); + peer_config.chain_config.test_signers = Some(test_signers.clone()); info!("---- Booting into Nakamoto Peer ----"); let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], Some(observer)); - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.chain.sortdb.as_ref().unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() @@ -6775,7 +6873,7 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( .unwrap(); let coinbase_nonce = 0; - let burn_block_height = get_tip(peer.sortdb.as_ref()).block_height; + let burn_block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; let reward_cycle = burnchain .block_height_to_reward_cycle(burn_block_height) .unwrap() as u128; @@ -6954,9 +7052,16 @@ fn test_scenario_one(use_nakamoto: bool) { // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer.config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer.config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, @@ -7043,6 +7148,7 @@ fn test_scenario_one(use_nakamoto: bool) { // 4.3 Check unlock height let unlock_height_expected = Value::UInt( peer.config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64 + lock_period as u64) .wrapping_sub(1) as u128, @@ -7092,6 +7198,7 @@ fn test_scenario_one(use_nakamoto: bool) { // 6.3 Check unlock height (end of cycle 7 - block 140) let unlock_height_expected = Value::UInt( peer.config + .chain_config .burnchain .reward_cycle_to_block_height((next_reward_cycle + lock_period) as u64) .wrapping_sub(1) as u128, @@ -7112,7 +7219,11 @@ fn test_scenario_one(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -7122,7 +7233,11 @@ fn test_scenario_one(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -7139,7 +7254,7 @@ fn test_scenario_one(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - peer_config.aggregate_public_key.unwrap(), + peer_config.chain_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -7150,6 +7265,7 @@ fn test_scenario_one(use_nakamoto: bool) { // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let mut target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); info!( @@ -7387,9 +7503,16 @@ fn test_deser_abort() { // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer.config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer.config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, @@ -7476,6 +7599,7 @@ fn test_deser_abort() { // 4.3 Check unlock height let unlock_height_expected = Value::UInt( peer.config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64 + lock_period as u64) .wrapping_sub(1) as u128, @@ -7525,6 +7649,7 @@ fn test_deser_abort() { // 6.3 Check unlock height (end of cycle 7 - block 140) let unlock_height_expected = Value::UInt( peer.config + .chain_config .burnchain .reward_cycle_to_block_height((next_reward_cycle + lock_period) as u64) .wrapping_sub(1) as u128, @@ -7712,9 +7837,16 @@ fn test_scenario_two(use_nakamoto: bool) { // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase for reward cycle 6) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, @@ -7824,7 +7956,11 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -7834,7 +7970,11 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -7844,7 +7984,11 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 3, next_reward_cycle, ); @@ -7854,7 +7998,7 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.unwrap(), + peer_config.chain_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -7870,6 +8014,7 @@ fn test_scenario_two(use_nakamoto: bool) { // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); let (latest_block, tx_block, receipts) = advance_to_block_height( @@ -8097,6 +8242,7 @@ fn test_scenario_three(use_nakamoto: bool) { david.principal.clone(), Some( peer.config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) .into(), @@ -8213,9 +8359,16 @@ fn test_scenario_three(use_nakamoto: bool) { // Commit txs in next block & advance to reward set calculation of the next reward cycle let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, @@ -8527,9 +8680,16 @@ fn test_scenario_four(use_nakamoto: bool) { // Commit tx & advance to the reward set calculation height (2nd block of the prepare phase for reward cycle 6) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, @@ -8561,7 +8721,11 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -8571,7 +8735,11 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -8581,7 +8749,11 @@ fn test_scenario_four(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -8598,7 +8770,11 @@ fn test_scenario_four(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -8608,9 +8784,16 @@ fn test_scenario_four(use_nakamoto: bool) { // Commit vote txs & move to the prepare phase of reward cycle 7 (block 155) let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64 + 1) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64); + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ); let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, @@ -8654,7 +8837,11 @@ fn test_scenario_four(use_nakamoto: bool) { .expect("No approved key found"); assert_eq!( approved_key, - peer_config.aggregate_public_key.clone().unwrap() + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap() ); // Alice stack-extend err tx @@ -8689,7 +8876,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.unwrap(), + peer_config.chain_config.aggregate_public_key.unwrap(), 1, 7, ); @@ -9089,7 +9276,7 @@ pub fn prepare_pox4_test<'a>( max_amount: None, }) .collect::>(); - let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + let mut pox_constants = TestChainstateConfig::default().burnchain.pox_constants; pox_constants.reward_cycle_length = 10; pox_constants.v2_unlock_height = 21; pox_constants.pox_3_activation_height = 26; @@ -9113,7 +9300,7 @@ pub fn prepare_pox4_test<'a>( info!("---- Booting into Nakamoto Peer ----"); let peer = boot_plan.boot_into_nakamoto_peer(vec![], observer); - let sort_db = peer.sortdb.as_ref().unwrap(); + let sort_db = peer.chain.sortdb.as_ref().unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() @@ -9121,7 +9308,7 @@ pub fn prepare_pox4_test<'a>( .unwrap(); let coinbase_nonce = 0; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; info!("Block height: {}", block_height); @@ -9139,16 +9326,16 @@ pub fn prepare_pox4_test<'a>( let target_height = burnchain.pox_constants.pox_4_activation_height; let mut coinbase_nonce = 0; let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + while get_tip(peer.chain.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height + if get_tip(peer.chain.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height { assert_latest_was_burn(&mut peer); } } - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + let block_height = get_tip(peer.chain.sortdb.as_ref()).block_height; ( burnchain, peer, @@ -9178,9 +9365,10 @@ pub fn tenure_with_txs_fallible( tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let blocks_and_sizes = peer.make_nakamoto_tenure_and( tenure_change_tx, @@ -9202,8 +9390,8 @@ pub fn tenure_with_txs_fallible( .map(|(block, _, _)| block) .collect(); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() @@ -9230,8 +9418,8 @@ pub fn tenure_with_txs( tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, @@ -9251,8 +9439,8 @@ pub fn tenure_with_txs( .map(|(block, _, _)| block) .collect(); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() @@ -9328,13 +9516,14 @@ fn missed_slots_no_unlock() { + 1; // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch25].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch25].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } // perform lockups so we can test that pox-4 does not exhibit unlock-on-miss behavior - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let alice_lockup = make_simple_pox_4_lock(&alice, &mut peer, 1024 * POX_THRESHOLD_STEPS_USTX, 6); @@ -9377,7 +9566,7 @@ fn missed_slots_no_unlock() { ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -9578,12 +9767,13 @@ fn no_lockups_2_5() { + 1; // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch25].start_height + while get_tip(peer.chain.sortdb.as_ref()).block_height + <= epochs[StacksEpochId::Epoch25].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); } - let tip = get_tip(peer.sortdb.as_ref()); + let tip = get_tip(peer.chain.sortdb.as_ref()); let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); @@ -9618,7 +9808,7 @@ fn no_lockups_2_5() { ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - while get_tip(peer.sortdb.as_ref()).block_height < height_target { + while get_tip(peer.chain.sortdb.as_ref()).block_height < height_target { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -9722,36 +9912,43 @@ fn test_scenario_five(use_nakamoto: bool) { let carl_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(carl_lock_period) as u64) as u128; let frank_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(frank_lock_period) as u64) as u128; let grace_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(grace_lock_period) as u64) as u128; let heidi_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(heidi_lock_period) as u64) as u128; let ivan_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(ivan_lock_period) as u64) as u128; let jude_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(jude_lock_period) as u64) as u128; let mallory_end_burn_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle.wrapping_add(mallory_lock_period) as u64) as u128; @@ -9964,15 +10161,22 @@ fn test_scenario_five(use_nakamoto: bool) { // Advance to reward set calculation of the next reward cycle let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); info!( "Scenario five: submitting stacking txs."; "target_height" => target_height, "next_reward_cycle" => next_reward_cycle, - "prepare_length" => peer_config.burnchain.pox_constants.prepare_length, + "prepare_length" => peer_config.chain_config.burnchain.pox_constants.prepare_length, ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, @@ -10031,7 +10235,11 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10039,7 +10247,11 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10047,7 +10259,11 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10058,6 +10274,7 @@ fn test_scenario_five(use_nakamoto: bool) { // Mine vote txs & advance to the reward set calculation of the next reward cycle let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); info!( @@ -10088,7 +10305,10 @@ fn test_scenario_five(use_nakamoto: bool) { } let approved_key = get_approved_aggregate_key(&mut peer, &latest_block, next_reward_cycle) .expect("No approved key found"); - assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + assert_eq!( + approved_key, + peer_config.chain_config.aggregate_public_key.unwrap() + ); // Stack for following reward cycle again and then advance to epoch 3.0 activation boundary let reward_cycle = peer.get_reward_cycle() as u128; @@ -10167,9 +10387,16 @@ fn test_scenario_five(use_nakamoto: bool) { let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); info!( "Scenario five: submitting extend and aggregate commit txs. Target height: {}", @@ -10225,8 +10452,9 @@ fn test_scenario_five(use_nakamoto: bool) { let cycle_id = next_reward_cycle; // Generate next cycle aggregate public key - peer_config.aggregate_public_key = Some( + peer_config.chain_config.aggregate_public_key = Some( peer_config + .chain_config .test_signers .unwrap() .generate_aggregate_key(cycle_id as u64), @@ -10239,7 +10467,11 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10247,7 +10479,11 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10255,7 +10491,11 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - peer_config.aggregate_public_key.clone().unwrap(), + peer_config + .chain_config + .aggregate_public_key + .clone() + .unwrap(), 1, next_reward_cycle, ); @@ -10266,6 +10506,7 @@ fn test_scenario_five(use_nakamoto: bool) { let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); // Submit vote transactions @@ -10297,7 +10538,10 @@ fn test_scenario_five(use_nakamoto: bool) { } let approved_key = get_approved_aggregate_key(&mut peer, &latest_block, next_reward_cycle) .expect("No approved key found"); - assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + assert_eq!( + approved_key, + peer_config.chain_config.aggregate_public_key.unwrap() + ); // Let us start stacking for the following reward cycle let current_reward_cycle = peer.get_reward_cycle() as u128; @@ -10378,9 +10622,16 @@ fn test_scenario_five(use_nakamoto: bool) { let target_height = peer .config + .chain_config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64) - .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) + .saturating_sub( + peer_config + .chain_config + .burnchain + .pox_constants + .prepare_length as u64, + ) .wrapping_add(2); // This assertion just makes testing logic a bit easier let davids_stackers = &[(grace, grace_lock_period), (heidi, heidi_lock_period)]; diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index 2418e75d5c1..7cb414c7aa5 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -350,8 +350,8 @@ pub fn prepare_signers_test<'a>( tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, @@ -409,8 +409,8 @@ fn advance_blocks( tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = peer.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let recipient_addr = boot_code_addr(false); let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 146b3061947..df7ea04c68e 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -413,7 +413,7 @@ impl StacksChainState { block_path.push(to_hex(&block_hash_bytes[0..2])); block_path.push(to_hex(&block_hash_bytes[2..4])); - block_path.push(format!("{}", index_block_hash)); + block_path.push(index_block_hash.to_string()); block_path } @@ -9746,7 +9746,7 @@ pub mod test { assert_eq!(mblock_info.len(), mblocks.len()); let this_mblock_info = &mblock_info[i]; - test_debug!("Pass {} (seq {})", &i, &this_mblock_info.sequence); + test_debug!("Pass {i} (seq {})", &this_mblock_info.sequence); assert_eq!(this_mblock_info.consensus_hash, consensus_hash); assert_eq!(this_mblock_info.anchored_block_hash, block.block_hash()); @@ -9799,7 +9799,7 @@ pub mod test { let mut parent_consensus_hashes = vec![]; for i in 0..32 { - test_debug!("Making block {}", i); + test_debug!("Making block {i}"); let privk = StacksPrivateKey::random(); let block = make_empty_coinbase_block(&privk); @@ -9814,7 +9814,7 @@ pub mod test { } for i in 0..blocks.len() { - test_debug!("Making microblock stream {}", i); + test_debug!("Making microblock stream {i}"); // make a sample microblock stream for block i let mut mblocks = make_sample_microblock_stream(&privks[i], &blocks[i].block_hash()); mblocks.truncate(3); @@ -9852,7 +9852,7 @@ pub mod test { .zip(µblocks) .enumerate() { - test_debug!("Store microblock stream {} to staging", i); + test_debug!("Store microblock stream {i} to staging"); for mblock in mblocks.iter() { test_debug!("Store microblock {}", &mblock.block_hash()); store_staging_microblock( @@ -10025,27 +10025,29 @@ pub mod test { .unwrap(); let initial_balance = 1000000000; - peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), initial_balance)]; let recv_addr = StacksAddress::from_string("ST1H1B54MY50RMBRRKS7GV2ZWG79RZ1RQ1ETW4E01").unwrap(); let mut peer = TestPeer::new(peer_config.clone()); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); // NOTE: first_stacks_block_height is the burnchain height at which the node starts mining. // The burnchain block at this height will have the VRF key register, but no block-commit. // The first burnchain block with a Stacks block is at first_stacks_block_height + 1. let (first_stacks_block_height, canonical_sort_id) = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); (sn.block_height, sn.sortition_id) }; let mut header_hashes = vec![]; for i in 0..(first_stacks_block_height + 1) { - let ic = peer.sortdb.as_ref().unwrap().index_conn(); + let ic = peer.chain.sortdb.as_ref().unwrap().index_conn(); let sn = SortitionDB::get_ancestor_snapshot(&ic, i, &canonical_sort_id) .unwrap() .unwrap(); @@ -10060,16 +10062,22 @@ pub mod test { } let last_stacks_block_height = first_stacks_block_height - + ((peer_config.burnchain.pox_constants.reward_cycle_length as u64) * 5) + + ((peer_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length as u64) + * 5) + 2; let mut mblock_nonce = 0; // make some blocks, up to and including a fractional reward cycle for tenure_id in 0..(last_stacks_block_height - first_stacks_block_height) { - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!(tip.block_height, first_stacks_block_height + tenure_id); @@ -10122,7 +10130,7 @@ pub mod test { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &peer_config.burnchain, + &peer_config.chain_config.burnchain, ) .unwrap(); @@ -10181,6 +10189,7 @@ pub mod test { } let total_reward_cycles = peer_config + .chain_config .burnchain .block_height_to_reward_cycle(last_stacks_block_height) .unwrap(); @@ -10197,14 +10206,20 @@ pub mod test { // everything is stored, so check each reward cycle for i in 0..total_reward_cycles { - let start_range = peer_config.burnchain.reward_cycle_to_block_height(i); + let start_range = peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i); let end_range = cmp::min( header_hashes.len() as u64, - peer_config.burnchain.reward_cycle_to_block_height(i + 1), + peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i + 1), ); let blocks_inv = chainstate .get_blocks_inventory_for_reward_cycle( - &peer_config.burnchain, + &peer_config.chain_config.burnchain, i, &header_hashes[(start_range as usize)..(end_range as usize)], ) @@ -10248,10 +10263,16 @@ pub mod test { // orphan blocks for i in 0..total_reward_cycles { - let start_range = peer_config.burnchain.reward_cycle_to_block_height(i); + let start_range = peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i); let end_range = cmp::min( header_hashes.len() as u64, - peer_config.burnchain.reward_cycle_to_block_height(i + 1), + peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i + 1), ); for block_height in start_range..end_range { if let Some(hdr_hash) = &header_hashes[block_height as usize].1 { @@ -10272,14 +10293,20 @@ pub mod test { } for i in 0..total_reward_cycles { - let start_range = peer_config.burnchain.reward_cycle_to_block_height(i); + let start_range = peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i); let end_range = cmp::min( header_hashes.len() as u64, - peer_config.burnchain.reward_cycle_to_block_height(i + 1), + peer_config + .chain_config + .burnchain + .reward_cycle_to_block_height(i + 1), ); let blocks_inv = chainstate .get_blocks_inventory_for_reward_cycle( - &peer_config.burnchain, + &peer_config.chain_config.burnchain, i, &header_hashes[(start_range as usize)..(end_range as usize)], ) @@ -10302,25 +10329,27 @@ pub mod test { #[test] fn test_get_parent_block_header() { let peer_config = TestPeerConfig::new(function_name!(), 21313, 21314); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; let mut last_block_ch: Option = None; let mut last_parent_opt: Option = None; for tenure_id in 0..num_blocks { - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -10387,7 +10416,7 @@ pub mod test { if tenure_id == 0 { let parent_header_opt = StacksChainState::load_parent_block_header( - &peer.sortdb.as_ref().unwrap().index_conn(), + &peer.chain.sortdb.as_ref().unwrap().index_conn(), &blocks_path, &consensus_hash, &stacks_block.block_hash(), @@ -10395,7 +10424,7 @@ pub mod test { assert!(parent_header_opt.is_err()); } else { let parent_header_opt = StacksChainState::load_parent_block_header( - &peer.sortdb.as_ref().unwrap().index_conn(), + &peer.chain.sortdb.as_ref().unwrap().index_conn(), &blocks_path, &consensus_hash, &stacks_block.block_hash(), @@ -10844,31 +10873,37 @@ pub mod test { .map(|addr| (addr.to_account_principal(), initial_balance)) .collect(); init_balances.push((addr.to_account_principal(), initial_balance)); - peer_config.initial_balances = init_balances; + peer_config.chain_config.initial_balances = init_balances; let mut epochs = StacksEpoch::unit_test_2_1(0); let last_epoch = epochs.last_mut().unwrap(); last_epoch.block_limit.runtime = 10_000_000; - peer_config.epochs = Some(epochs); - peer_config.burnchain.pox_constants.v1_unlock_height = 26; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + peer_config + .chain_config + .burnchain + .pox_constants + .v1_unlock_height = 26; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; let mut last_block_id = StacksBlockId([0x00; 32]); for tenure_id in 0..num_blocks { let del_addr = &del_addrs[tenure_id]; - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -11024,11 +11059,12 @@ pub mod test { ); } - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); { let chainstate = peer.chainstate(); let (mut chainstate_tx, clarity_instance) = @@ -11061,12 +11097,12 @@ pub mod test { assert_eq!(transfer_stx_ops, expected_transfer_ops); assert_eq!(delegate_stx_ops, expected_del_ops); } - peer.sortdb.replace(sortdb); + peer.chain.sortdb.replace(sortdb); } // all burnchain transactions mined, even if there was no sortition in the burn block in // which they were mined. - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); // definitely missing some blocks -- there are empty sortitions let stacks_tip = @@ -11085,7 +11121,7 @@ pub mod test { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); - peer.sortdb.replace(sortdb); + peer.chain.sortdb.replace(sortdb); assert_eq!( account.stx_balance.get_total_balance().unwrap(), @@ -11166,32 +11202,38 @@ pub mod test { .map(|addr| (addr.to_account_principal(), initial_balance)) .collect(); init_balances.push((addr.to_account_principal(), initial_balance)); - peer_config.initial_balances = init_balances; + peer_config.chain_config.initial_balances = init_balances; let mut epochs = StacksEpoch::unit_test_2_1(0); let last_epoch = epochs.last_mut().unwrap(); last_epoch.block_limit.runtime = 10_000_000; last_epoch.block_limit.read_length = 10_000_000; - peer_config.epochs = Some(epochs); - peer_config.burnchain.pox_constants.v1_unlock_height = 26; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + peer_config + .chain_config + .burnchain + .pox_constants + .v1_unlock_height = 26; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; let mut last_block_id = StacksBlockId([0x00; 32]); for tenure_id in 0..num_blocks { let del_addr = &del_addrs[tenure_id]; - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -11704,11 +11746,12 @@ pub mod test { ); } - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); { let chainstate = peer.chainstate(); let (mut chainstate_tx, clarity_instance) = @@ -11741,12 +11784,12 @@ pub mod test { assert_eq!(transfer_stx_ops, expected_transfer_ops); assert_eq!(delegate_stx_ops, expected_delegate_ops); } - peer.sortdb.replace(sortdb); + peer.chain.sortdb.replace(sortdb); } // all burnchain transactions mined, even if there was no sortition in the burn block in // which they were mined. - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); // definitely missing some blocks -- there are empty sortitions let stacks_tip = @@ -11768,7 +11811,7 @@ pub mod test { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); - peer.sortdb.replace(sortdb); + peer.chain.sortdb.replace(sortdb); // skipped tenure 6's TransferSTX assert_eq!( diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 3cef8148d67..08eb119f3f9 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -652,18 +652,20 @@ mod test { let initial_balance = 1000000000; let mut peer_config = TestPeerConfig::new(function_name!(), 7000, 7001); - peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), initial_balance)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -674,9 +676,10 @@ mod test { Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); // send transactions to the mempool - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -754,7 +757,7 @@ mod test { // build 1-block microblock stream let microblocks = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); @@ -813,7 +816,7 @@ mod test { microblock }; - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); vec![microblock] }; @@ -829,7 +832,7 @@ mod test { } // process microblock stream to generate unconfirmed state - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); @@ -848,14 +851,14 @@ mod test { }) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); // move 1 stx per round assert_eq!(recv_balance.amount_unlocked(), (tenure_id + 1) as u128); let (canonical_burn, canonical_block) = SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); @@ -869,7 +872,7 @@ mod test { }) }) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert_eq!(confirmed_recv_balance.amount_unlocked(), tenure_id as u128); eprintln!("\nrecv_balance: {}\nconfirmed_recv_balance: {}\nblock header {}: {:?}\ntip: {}/{}\n", recv_balance.amount_unlocked(), confirmed_recv_balance.amount_unlocked(), &stacks_block.block_hash(), &stacks_block.header, &canonical_burn, &canonical_block); @@ -889,18 +892,20 @@ mod test { let initial_balance = 1000000000; let mut peer_config = TestPeerConfig::new(function_name!(), 7002, 7003); - peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), initial_balance)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); tip.block_height }; @@ -911,9 +916,10 @@ mod test { Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); // send transactions to the mempool - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -990,7 +996,7 @@ mod test { StacksAddress::from_string("ST1H1B54MY50RMBRRKS7GV2ZWG79RZ1RQ1ETW4E01").unwrap(); // build microblock stream iteratively, and test balances at each additional microblock - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let microblocks = { let sort_iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) @@ -1055,7 +1061,7 @@ mod test { } microblocks }; - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); // store microblock stream for (i, mblock) in microblocks.into_iter().enumerate() { @@ -1068,7 +1074,7 @@ mod test { .unwrap(); // process microblock stream to generate unconfirmed state - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); peer.chainstate() .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); @@ -1087,7 +1093,7 @@ mod test { ) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); // move 100 ustx per round -- 10 per mblock assert_eq!( @@ -1097,7 +1103,7 @@ mod test { let (canonical_burn, canonical_block) = SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let confirmed_recv_balance = peer .chainstate() .with_read_only_clarity_tx( @@ -1112,7 +1118,7 @@ mod test { }, ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert_eq!( confirmed_recv_balance.amount_unlocked(), @@ -1136,25 +1142,27 @@ mod test { let initial_balance = 1000000000; let mut peer_config = TestPeerConfig::new(function_name!(), 7004, 7005); - peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; - peer_config.epochs = Some(EpochList::new(&[StacksEpoch { + peer_config.chain_config.initial_balances = + vec![(addr.to_account_principal(), initial_balance)]; + peer_config.chain_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, end_height: (i64::MAX) as u64, block_limit: BLOCK_LIMIT_MAINNET_20, network_epoch: PEER_VERSION_EPOCH_2_0, }])); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 5; let num_microblocks = 3; let first_stacks_block_height = { - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); tip.block_height }; @@ -1170,9 +1178,10 @@ mod test { Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); // send transactions to the mempool - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_eq!( tip.block_height, @@ -1284,8 +1293,8 @@ mod test { &stacks_block.block_hash(), ); - let mut sortdb = peer.sortdb.take().unwrap(); - let mut inner_node = peer.stacks_node.take().unwrap(); + let mut sortdb = peer.chain.sortdb.take().unwrap(); + let mut inner_node = peer.chain.stacks_node.take().unwrap(); for i in 0..num_microblocks { Relayer::refresh_unconfirmed(&mut inner_node.chainstate, &mut sortdb); @@ -1368,8 +1377,8 @@ mod test { .unwrap(); } - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(inner_node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(inner_node); } let (consensus_hash, canonical_block) = @@ -1378,7 +1387,7 @@ mod test { StacksBlockHeader::make_index_block_hash(&consensus_hash, &canonical_block); // process microblock stream to generate unconfirmed state - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let iconn = sortdb .index_handle_at_block(peer.chainstate(), &canonical_tip) .unwrap(); @@ -1397,7 +1406,7 @@ mod test { }) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); // all valid txs were processed assert_eq!(db_recv_balance.amount_unlocked(), recv_balance); diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 73e3072a074..b81e77ccb1e 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -61,7 +61,7 @@ fn test_bad_microblock_fees_pre_v210() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2018, 2019); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; @@ -95,8 +95,8 @@ fn test_bad_microblock_fees_pre_v210() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ]); - peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + let burnchain = peer_config.chain_config.burnchain.clone(); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -109,11 +109,12 @@ fn test_bad_microblock_fees_pre_v210() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -123,8 +124,9 @@ fn test_bad_microblock_fees_pre_v210() { let mut block_ids = vec![]; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -377,11 +379,11 @@ fn test_bad_microblock_fees_fix_transition() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2020, 2021); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -419,7 +421,7 @@ fn test_bad_microblock_fees_fix_transition() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -432,11 +434,12 @@ fn test_bad_microblock_fees_fix_transition() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -446,8 +449,9 @@ fn test_bad_microblock_fees_fix_transition() { let mut block_ids = vec![]; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -734,11 +738,11 @@ fn test_get_block_info_v210() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2022, 2023); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -776,7 +780,7 @@ fn test_get_block_info_v210() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -789,11 +793,12 @@ fn test_get_block_info_v210() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -802,8 +807,9 @@ fn test_get_block_info_v210() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -996,7 +1002,7 @@ fn test_get_block_info_v210() { } for i in 0..num_blocks { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); @@ -1073,7 +1079,7 @@ fn test_get_block_info_v210() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); } } @@ -1105,11 +1111,11 @@ fn test_get_block_info_v210_no_microblocks() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2022, 2023); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -1147,7 +1153,7 @@ fn test_get_block_info_v210_no_microblocks() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -1160,11 +1166,12 @@ fn test_get_block_info_v210_no_microblocks() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -1173,8 +1180,9 @@ fn test_get_block_info_v210_no_microblocks() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -1299,7 +1307,7 @@ fn test_get_block_info_v210_no_microblocks() { } for i in 0..num_blocks { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); @@ -1364,7 +1372,7 @@ fn test_get_block_info_v210_no_microblocks() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); } } @@ -1425,7 +1433,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { 2024, 2025, ); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; @@ -1466,8 +1474,8 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + let burnchain = peer_config.chain_config.burnchain.clone(); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -1480,11 +1488,12 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -1499,8 +1508,9 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -1764,7 +1774,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { let mut recipient_total_reward = 0; for i in 0..num_blocks { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); @@ -1867,11 +1877,11 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); } // finally, verify that the alt. recipient got all the coinbases except the first one - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 87faa6f6043..d1a17b7c39f 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -58,23 +58,25 @@ use crate::cost_estimates::UnitEstimator; #[test] fn test_build_anchored_blocks_empty() { let peer_config = TestPeerConfig::new(function_name!(), 2000, 2001); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; let mut last_block: Option = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); assert_eq!( tip.block_height, @@ -157,17 +159,18 @@ fn test_build_anchored_blocks_stx_transfers_single() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2002, 2003); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -177,8 +180,9 @@ fn test_build_anchored_blocks_stx_transfers_single() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -290,17 +294,18 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2022, 2023); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -310,8 +315,9 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -426,16 +432,17 @@ fn test_build_anchored_blocks_stx_transfers_multi() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2004, 2005); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -445,8 +452,9 @@ fn test_build_anchored_blocks_stx_transfers_multi() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -586,8 +594,8 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2016, 2017); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -618,7 +626,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; @@ -630,11 +638,12 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -644,8 +653,9 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -822,8 +832,8 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2018, 2019); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -854,7 +864,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; @@ -866,11 +876,12 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -882,8 +893,9 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -1106,7 +1118,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { } last_block_ch = Some( - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) .unwrap() .consensus_hash, ); @@ -1163,8 +1175,8 @@ fn test_build_anchored_blocks_skip_too_expensive() { initial_balances.push((addr_extra.to_account_principal(), 200000000000)); let mut peer_config = TestPeerConfig::new(function_name!(), 2006, 2007); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(EpochList::new(&[StacksEpoch { + peer_config.chain_config.initial_balances = initial_balances; + peer_config.chain_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, end_height: i64::MAX as u64, @@ -1179,15 +1191,16 @@ fn test_build_anchored_blocks_skip_too_expensive() { }, network_epoch: PEER_VERSION_EPOCH_2_0, }])); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -1197,8 +1210,9 @@ fn test_build_anchored_blocks_skip_too_expensive() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1238,7 +1252,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { if tenure_id > 0 { let mut expensive_part = vec![]; for i in 0..100 { - expensive_part.push(format!("(define-data-var var-{} int 0)", i)); + expensive_part.push(format!("(define-data-var var-{i} int 0)")); } let contract = format!( "{} @@ -1365,18 +1379,18 @@ fn test_build_anchored_blocks_mempool_fee_transaction_too_low() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2032, 2033); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1485,18 +1499,18 @@ fn test_build_anchored_blocks_zero_fee_transaction() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2032, 2033); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1590,12 +1604,12 @@ fn test_build_anchored_blocks_multiple_chaintips() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2008, 2009); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); // make a blank chainstate and mempool so we can mine empty blocks // without punishing the correspondingly "too expensive" transactions @@ -1603,15 +1617,17 @@ fn test_build_anchored_blocks_multiple_chaintips() { let mut blank_mempool = MemPoolDB::open_test(false, 1, &blank_chainstate.root_path).unwrap(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1734,23 +1750,25 @@ fn test_build_anchored_blocks_empty_chaintips() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2010, 2011); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -1874,23 +1892,25 @@ fn test_build_anchored_blocks_too_expensive_transactions() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2013, 2014); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -2026,15 +2046,16 @@ fn test_build_anchored_blocks_too_expensive_transactions() { #[test] fn test_build_anchored_blocks_invalid() { let peer_config = TestPeerConfig::new(function_name!(), 2014, 2015); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -2058,7 +2079,7 @@ fn test_build_anchored_blocks_invalid() { for tenure_id in 0..num_blocks { // send transactions to the mempool let mut tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) .unwrap(); if tenure_id == bad_block_ancestor_tenure { @@ -2233,24 +2254,26 @@ fn test_build_anchored_blocks_bad_nonces() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2012, 2013); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; for tenure_id in 0..num_blocks { - eprintln!("Start tenure {:?}", tenure_id); + eprintln!("Start tenure {tenure_id:?}"); // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -2482,16 +2505,17 @@ fn test_build_microblock_stream_forks() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2014, 2015); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -2500,8 +2524,9 @@ fn test_build_microblock_stream_forks() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -2780,16 +2805,17 @@ fn test_build_microblock_stream_forks_with_descendants() { } let mut peer_config = TestPeerConfig::new(function_name!(), 2014, 2015); - peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -2808,8 +2834,9 @@ fn test_build_microblock_stream_forks_with_descendants() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (mut burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -3027,7 +3054,7 @@ fn test_build_microblock_stream_forks_with_descendants() { let mblock_pubkey_hash = Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privks[tenure_id])); - test_debug!("Produce tenure {} block off of {}/{}", tenure_id, &parent_consensus_hash, &parent_header_hash); + test_debug!("Produce tenure {tenure_id} block off of {parent_consensus_hash}/{parent_header_hash}"); // force tenures 2 and 3 to mine off of forked siblings deeper than the // detected fork @@ -3249,11 +3276,11 @@ fn test_contract_call_across_clarity_versions() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2024, 2025); - peer_config.initial_balances = vec![ + peer_config.chain_config.initial_balances = vec![ (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let epochs = EpochList::new(&[ StacksEpoch { @@ -3285,16 +3312,17 @@ fn test_contract_call_across_clarity_versions() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); + peer_config.chain_config.epochs = Some(epochs); let num_blocks = 10; let mut anchored_sender_nonce = 0; let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -3303,8 +3331,9 @@ fn test_contract_call_across_clarity_versions() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let acct = get_stacks_account(&mut peer, &addr.to_account_principal()); @@ -3706,7 +3735,7 @@ fn test_contract_call_across_clarity_versions() { // all contracts deployed and called the right number of times, indicating that // cross-clarity-version contract calls are doable - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); @@ -3819,8 +3848,8 @@ fn test_is_tx_problematic() { initial_balances.push((addr_extra.to_account_principal(), 200000000000)); let mut peer_config = TestPeerConfig::new(function_name!(), 2018, 2019); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(EpochList::new(&[ + peer_config.chain_config.initial_balances = initial_balances; + peer_config.chain_config.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -3836,15 +3865,16 @@ fn test_is_tx_problematic() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ])); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -3854,8 +3884,9 @@ fn test_is_tx_problematic() { let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4292,8 +4323,8 @@ fn mempool_incorporate_pox_unlocks() { let principal = PrincipalData::from(addr.clone()); let mut peer_config = TestPeerConfig::new(function_name!(), 2020, 2021); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(EpochList::new(&[ + peer_config.chain_config.initial_balances = initial_balances; + peer_config.chain_config.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -4316,22 +4347,29 @@ fn mempool_incorporate_pox_unlocks() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ])); - peer_config.burnchain.pox_constants.v1_unlock_height = - peer_config.epochs.as_ref().unwrap()[StacksEpochId::Epoch2_05].end_height as u32 + 1; - let pox_constants = peer_config.burnchain.pox_constants.clone(); - let burnchain = peer_config.burnchain.clone(); + peer_config + .chain_config + .burnchain + .pox_constants + .v1_unlock_height = peer_config.chain_config.epochs.as_ref().unwrap() + [StacksEpochId::Epoch2_05] + .end_height as u32 + + 1; + let pox_constants = peer_config.chain_config.burnchain.pox_constants.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; - let first_block_height = peer.sortdb.as_ref().unwrap().first_block_height; + let first_block_height = peer.chain.sortdb.as_ref().unwrap().first_block_height; let first_pox_cycle = pox_constants .block_height_to_reward_cycle(first_block_height, first_stacks_block_height) .unwrap(); @@ -4355,8 +4393,9 @@ fn mempool_incorporate_pox_unlocks() { for tenure_id in 0..num_blocks { // send transactions to the mempool - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4527,16 +4566,17 @@ fn test_fee_order_mismatch_nonce_order() { .unwrap(); let mut peer_config = TestPeerConfig::new(function_name!(), 2002, 2003); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height }; @@ -4545,8 +4585,8 @@ fn test_fee_order_mismatch_nonce_order() { let sender_nonce = 0; // send transactions to the mempool - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -4714,9 +4754,10 @@ fn paramaterized_mempool_walk_test( ); let mut peer_config = TestPeerConfig::new(&test_name, 2002, 2003); - peer_config.initial_balances = vec![]; + peer_config.chain_config.initial_balances = vec![]; for (privk, addr) in &key_address_pairs { peer_config + .chain_config .initial_balances .push((addr.to_account_principal(), 1000000000)); } @@ -4895,9 +4936,10 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { let test_name = function_name!(); let mut peer_config = TestPeerConfig::new(&test_name, 0, 0); - peer_config.initial_balances = vec![]; + peer_config.chain_config.initial_balances = vec![]; for (privk, addr) in &key_address_pairs { peer_config + .chain_config .initial_balances .push((addr.to_account_principal(), 1000000000)); } @@ -5145,15 +5187,15 @@ fn run_mempool_walk_strategy_nonce_order_test( .collect(); let mut peer_config = TestPeerConfig::new(test_name, 2030, 2031); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.initial_balances = initial_balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let (burn_ops, stacks_block, microblocks) = peer.make_tenure( |ref mut miner, @@ -5198,7 +5240,7 @@ fn run_mempool_walk_strategy_nonce_order_test( &privk, tx_nonce, 200 * (tx_nonce + 1), // Higher nonce = higher fee - &format!("contract-{}", tx_nonce), + &format!("contract-{tx_nonce}"), contract, ) }) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs new file mode 100644 index 00000000000..0204e51c5c4 --- /dev/null +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -0,0 +1,868 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::HashMap; + +use clarity::boot_util::boot_code_addr; +use clarity::codec::StacksMessageCodec; +use clarity::consts::{ + CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_EPOCH_3_1, PEER_VERSION_EPOCH_3_2, + PEER_VERSION_EPOCH_3_3, STACKS_EPOCH_MAX, +}; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash}; +use clarity::types::{StacksEpoch, StacksEpochId}; +use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; +use clarity::util::secp256k1::MessageSignature; +use clarity::vm::ast::errors::{ParseError, ParseErrorKind}; +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::events::StacksTransactionEvent; +use clarity::vm::types::{PrincipalData, ResponseData}; +use clarity::vm::{Value as ClarityValue, MAX_CALL_STACK_DEPTH}; +use serde::{Deserialize, Serialize}; +use stacks_common::bitvec::BitVec; + +use crate::burnchains::PoxConstants; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; +use crate::chainstate::stacks::db::StacksEpochReceipt; +use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, TenureChangeCause}; +use crate::chainstate::tests::TestChainstate; +use crate::clarity_vm::clarity::{ClarityError, PreCommitClarityBlock}; +use crate::core::test_util::{make_contract_publish, make_stacks_transfer_tx}; +use crate::core::{EpochList, BLOCK_LIMIT_MAINNET_21}; +use crate::net::tests::NakamotoBootPlan; +pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; +pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; +pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; + +fn epoch_3_0_onwards(first_burnchain_height: u64) -> EpochList { + info!("StacksEpoch unit_test first_burn_height = {first_burnchain_height}"); + + EpochList::new(&[ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 0, + end_height: first_burnchain_height, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 1, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: first_burnchain_height + 1, + end_height: first_burnchain_height + 2, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch32, + start_height: first_burnchain_height + 2, + end_height: first_burnchain_height + 3, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch33, + start_height: first_burnchain_height + 3, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_3, + }, + ]) +} + +/// Represents the expected output of a transaction in a test. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedTransactionOutput { + /// The expected return value of the transaction. + pub return_type: ClarityValue, + /// The expected execution cost of the transaction. + pub cost: ExecutionCost, +} + +/// Represents the expected outputs for a block's execution. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedBlockOutput { + /// The expected outputs for each transaction, in input order. + pub transactions: Vec, + /// The total execution cost of the block. + pub total_block_cost: ExecutionCost, +} + +/// Represents the expected result of a consensus test. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ExpectedResult { + /// The test should succeed with the specified outputs. + Success(ExpectedBlockOutput), + /// The test should fail with an error matching the specified string + /// Cannot match on the exact Error directly as they do not implement + /// Serialize/Deserialize or PartialEq + Failure(String), +} + +/// Represents a block to be appended in a test and its expected result. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct TestBlock { + /// Hex representation of the MARF hash for block construction. + pub marf_hash: String, + /// Transactions to include in the block + pub transactions: Vec, + /// The expected result after appending the constructed block. + pub expected_result: ExpectedResult, +} + +/// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsensusTestVector { + /// Initial balances for the provided PrincipalData during chainstate instantiation. + pub initial_balances: Vec<(PrincipalData, u64)>, + /// A mapping of epoch to Blocks that should be applied in that epoch + pub epoch_blocks: HashMap>, +} + +/// Tracks mismatches between actual and expected transaction results. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TransactionMismatch { + /// The index of the transaction with mismatches. + pub index: u32, + /// Mismatch between actual and expected return types, if any. + pub return_type: Option<(ClarityValue, ClarityValue)>, + /// Mismatch between actual and expected execution costs, if any. + pub cost: Option<(ExecutionCost, ExecutionCost)>, +} + +impl TransactionMismatch { + /// Creates a new `TransactionMismatch` for the given transaction index. + fn new(index: u32) -> Self { + Self { + index, + return_type: None, + cost: None, + } + } + + /// Adds a return type mismatch to the transaction. + fn with_return_type_mismatch(mut self, actual: ClarityValue, expected: ClarityValue) -> Self { + self.return_type = Some((actual, expected)); + self + } + + /// Adds an execution cost mismatch to the transaction. + fn with_cost_mismatch(mut self, actual: ExecutionCost, expected: ExecutionCost) -> Self { + self.cost = Some((actual, expected)); + self + } + + /// Returns true if no mismatches are recorded. + fn is_empty(&self) -> bool { + self.return_type.is_none() && self.cost.is_none() + } +} + +/// Aggregates all mismatches between actual and expected test results. +#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] +pub struct ConsensusMismatch { + /// Mismatches for individual transactions. + pub transactions: Vec, + /// Mismatch between actual and expected total block costs, if any. + pub total_block_cost: Option<(ExecutionCost, ExecutionCost)>, + /// Mismatch between actual and expected error messages, if any. + pub error: Option<(String, String)>, +} + +impl ConsensusMismatch { + /// Creates a `ConsensusMismatch` from test results, if mismatches exist. + pub fn from_test_result<'a>( + append_result: Result< + ( + StacksEpochReceipt, + PreCommitClarityBlock<'a>, + Option, + Vec, + ), + ChainstateError, + >, + expected_result: ExpectedResult, + ) -> Option { + let mut mismatches = ConsensusMismatch::default(); + match (append_result, expected_result) { + (Ok((epoch_receipt, clarity_commit, _, _)), ExpectedResult::Success(expected)) => { + // Convert transaction receipts to `ExpectedTransactionOutput` for comparison. + let actual_transactions: Vec<_> = epoch_receipt + .tx_receipts + .iter() + .map(|r| { + ( + r.tx_index, + ExpectedTransactionOutput { + return_type: r.result.clone(), + cost: r.execution_cost.clone(), + }, + ) + }) + .collect(); + + // Compare each transaction's actual vs expected outputs. + for ((tx_index, actual_tx), expected_tx) in + actual_transactions.iter().zip(expected.transactions.iter()) + { + let mut tx_mismatch = TransactionMismatch::new(*tx_index); + let mut has_mismatch = false; + + if actual_tx.return_type != expected_tx.return_type { + tx_mismatch = tx_mismatch.with_return_type_mismatch( + actual_tx.return_type.clone(), + expected_tx.return_type.clone(), + ); + has_mismatch = true; + } + + if actual_tx.cost != expected_tx.cost { + tx_mismatch = tx_mismatch + .with_cost_mismatch(actual_tx.cost.clone(), expected_tx.cost.clone()); + has_mismatch = true; + } + + if has_mismatch { + mismatches.add_transaction_mismatch(tx_mismatch); + } + } + + // Compare total block execution cost. + if epoch_receipt.anchored_block_cost != expected.total_block_cost { + mismatches.add_total_block_cost_mismatch( + &epoch_receipt.anchored_block_cost, + &expected.total_block_cost, + ); + } + // TODO: add any additional mismatches we might care about? + clarity_commit.commit(); + } + (Ok(_), ExpectedResult::Failure(expected_err)) => { + mismatches.error = Some(("Ok".to_string(), expected_err)); + } + (Err(actual_err), ExpectedResult::Failure(expected_err)) => { + let actual_err_str = actual_err.to_string(); + if actual_err_str != expected_err { + mismatches.error = Some((actual_err_str, expected_err)); + } + } + (Err(actual_err), ExpectedResult::Success(_)) => { + mismatches.error = Some((actual_err.to_string(), "Success".into())); + } + } + + if mismatches.is_empty() { + None + } else { + Some(mismatches) + } + } + + /// Adds a transaction mismatch to the collection. + fn add_transaction_mismatch(&mut self, mismatch: TransactionMismatch) { + self.transactions.push(mismatch); + } + + /// Records a total block cost mismatch. + fn add_total_block_cost_mismatch(&mut self, actual: &ExecutionCost, expected: &ExecutionCost) { + self.total_block_cost = Some((actual.clone(), expected.clone())); + } + + /// Returns true if no mismatches are recorded. + pub fn is_empty(&self) -> bool { + self.transactions.is_empty() && self.total_block_cost.is_none() && self.error.is_none() + } + + /// Serializes the given `ConsensusMismatch` as pretty-printed JSON, + /// or returns an empty string if `None`. + pub fn to_json_string_pretty(mismatch: &Option) -> String { + mismatch + .as_ref() + .map(|m| serde_json::to_string_pretty(m).unwrap()) + .unwrap_or("".into()) + } +} + +/// Represents a consensus test with chainstate and test vector. +pub struct ConsensusTest<'a> { + pub chain: TestChainstate<'a>, + pub test_vector: ConsensusTestVector, +} + +impl ConsensusTest<'_> { + /// Creates a new `ConsensusTest` with the given test name and vector. + pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { + // Validate blocks + for (epoch_id, blocks) in &test_vector.epoch_blocks { + assert!( + !matches!( + *epoch_id, + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + ), + "Pre-Nakamoto Tenures are not Supported" + ); + for block in blocks { + if let ExpectedResult::Success(output) = &block.expected_result { + assert_eq!( + output.transactions.len(), + block.transactions.len(), + "Test block is invalid. Must specify an expected output per input transaction" + ); + } + } + } + + let privk = StacksPrivateKey::from_hex( + "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", + ) + .unwrap(); + + // Set up chainstate to start at Epoch 3.0 + // We don't really ever want the reward cycle to force a new signer set... + // so for now just set the cycle length to a high value (100) + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_pox_constants(100, 3) + .with_initial_balances(test_vector.initial_balances.clone()) + .with_private_key(privk); + let epochs = epoch_3_0_onwards( + (boot_plan.pox_constants.pox_4_activation_height + + boot_plan.pox_constants.reward_cycle_length + + 1) as u64, + ); + boot_plan = boot_plan.with_epochs(epochs); + let chain = boot_plan.boot_nakamoto_chainstate(None); + + Self { chain, test_vector } + } + + /// Advances the chainstate to the specified epoch. Creating a tenure change block per burn block height + fn advance_to_epoch(&mut self, target_epoch: StacksEpochId) { + let burn_block_height = self.chain.get_burn_block_height(); + let mut current_epoch = + SortitionDB::get_stacks_epoch(self.chain.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; + assert!(current_epoch <= target_epoch, "Chainstate is already at a higher epoch than the target. Current epoch: {current_epoch}. Target epoch: {target_epoch}"); + while current_epoch < target_epoch { + let (burn_ops, mut tenure_change, miner_key) = self + .chain + .begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, header_hash, consensus_hash) = self.chain.next_burnchain_block(burn_ops); + let vrf_proof = self.chain.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + let tenure_change_tx = self.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = self.chain.miner.make_nakamoto_coinbase(None, vrf_proof); + + let _blocks_and_sizes = + self.chain + .make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); + let burn_block_height = self.chain.get_burn_block_height(); + current_epoch = + SortitionDB::get_stacks_epoch(self.chain.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; + } + } + + /// Runs the consensus test for the test vector, advancing epochs as needed. + pub fn run(mut self) { + // Get sorted epochs + let mut epochs: Vec = + self.test_vector.epoch_blocks.keys().cloned().collect(); + epochs.sort(); + + for epoch in epochs { + debug!( + "--------- Processing epoch {epoch:?} with {} blocks ---------", + self.test_vector.epoch_blocks[&epoch].len() + ); + self.advance_to_epoch(epoch); + for (i, block) in self.test_vector.epoch_blocks[&epoch].iter().enumerate() { + debug!("--------- Running block {i} for epoch {epoch:?} ---------"); + let (nakamoto_block, block_size) = + self.construct_nakamoto_block(&block.marf_hash, &block.transactions); + let sortdb = self.chain.sortdb.take().unwrap(); + let chain_tip = NakamotoChainState::get_canonical_block_header( + self.chain.stacks_node().chainstate.db(), + &sortdb, + ) + .unwrap() + .unwrap(); + let pox_constants = PoxConstants::test_default(); + + debug!( + "--------- Appending block {} ---------", + nakamoto_block.header.signer_signature_hash(); + "block" => ?nakamoto_block + ); + { + let (mut chainstate_tx, clarity_instance) = self + .chain + .stacks_node() + .chainstate + .chainstate_tx_begin() + .unwrap(); + + let mut burndb_conn = sortdb.index_handle_at_tip(); + + let result = NakamotoChainState::append_block( + &mut chainstate_tx, + clarity_instance, + &mut burndb_conn, + &chain_tip.consensus_hash, + &pox_constants, + &chain_tip, + &chain_tip.burn_header_hash, + chain_tip.burn_header_height, + chain_tip.burn_header_timestamp, + &nakamoto_block, + block_size.try_into().unwrap(), + nakamoto_block.header.burn_spent, + 1500, + &RewardSet::empty(), + false, + ); + + debug!("--------- Appended block: {} ---------", result.is_ok()); + + // Compare actual vs expected results. + let mismatches = + ConsensusMismatch::from_test_result(result, block.expected_result.clone()); + assert!( + mismatches.is_none(), + "Mismatches found in block {i} for epoch {epoch:?}: {}", + ConsensusMismatch::to_json_string_pretty(&mismatches) + ); + chainstate_tx.commit().unwrap(); + } + + // Restore chainstate for the next block + self.chain.sortdb = Some(sortdb); + } + } + } + + /// Constructs a Nakamoto block with the given transactions and state index root. + fn construct_nakamoto_block( + &self, + marf_hash: &str, + transactions: &[StacksTransaction], + ) -> (NakamotoBlock, usize) { + let state_index_root = TrieHash::from_hex(marf_hash).unwrap(); + let chain_tip = NakamotoChainState::get_canonical_block_header( + self.chain.stacks_node.as_ref().unwrap().chainstate.db(), + self.chain.sortdb.as_ref().unwrap(), + ) + .unwrap() + .unwrap(); + let cycle = self.chain.get_reward_cycle(); + let burn_spent = SortitionDB::get_block_snapshot_consensus( + self.chain.sortdb_ref().conn(), + &chain_tip.consensus_hash, + ) + .unwrap() + .map(|sn| sn.total_burn) + .unwrap(); + let mut block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: chain_tip.stacks_block_height + 1, + burn_spent, + consensus_hash: chain_tip.consensus_hash.clone(), + parent_block_id: chain_tip.index_block_hash(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), + state_index_root, + timestamp: 1, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: transactions.to_vec(), + }; + + let tx_merkle_root = { + let txid_vecs: Vec<_> = block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + MerkleTree::::new(&txid_vecs).root() + }; + + block.header.tx_merkle_root = tx_merkle_root; + self.chain.miner.sign_nakamoto_block(&mut block); + let mut signers = self.chain.config.test_signers.clone().unwrap_or_default(); + signers.sign_nakamoto_block(&mut block, cycle); + let block_len = block.serialize_to_vec().len(); + (block, block_len) + } +} + +#[test] +fn test_append_empty_blocks() { + let mut epoch_blocks = HashMap::new(); + let expected_result = ExpectedResult::Success(ExpectedBlockOutput { + transactions: vec![], + total_block_cost: ExecutionCost::ZERO, + }); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + epoch_blocks, + }; + ConsensusTest::new(function_name!(), test_vector).run(); +} + +#[test] +fn test_append_state_index_root_mismatches() { + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block ef45bfa44231d9e7aff094b53cfd48df0456067312f169a499354c4273a66fe3 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + ) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block a14d0b5c8d3c49554aeb462a8fe019718195789fa1dcd642059b75e41f0ce9cc state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), + ) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block f8120b4a632ee1d49fbbde3e01289588389cd205cab459a4493a7d58d2dc18ed state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into(), + ) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block 4dcb48b684d105ff0e0ab8becddd4a2d5623cc8b168aacf9c455e20b3e610e63 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into(), + ) + .to_string(), + ), + }], + ); + + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + epoch_blocks, + }; + ConsensusTest::new(function_name!(), test_vector).run(); +} + +#[test] +fn test_append_stx_transfers_success() { + let sender_privks = [ + StacksPrivateKey::from_hex(SK_1).unwrap(), + StacksPrivateKey::from_hex(SK_2).unwrap(), + StacksPrivateKey::from_hex(SK_3).unwrap(), + ]; + let send_amount = 1_000; + let tx_fee = 180; + let mut initial_balances = Vec::new(); + let transactions: Vec<_> = sender_privks + .iter() + .map(|sender_privk| { + initial_balances.push(( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_privk)).into(), + send_amount + tx_fee, + )); + // Interestingly, it doesn't seem to care about nonce... + make_stacks_transfer_tx( + sender_privk, + 0, + tx_fee, + CHAIN_ID_TESTNET, + &boot_code_addr(false).into(), + send_amount, + ) + }) + .collect(); + let transfer_result = ExpectedTransactionOutput { + return_type: ClarityValue::Response(ResponseData { + committed: true, + data: Box::new(ClarityValue::Bool(true)), + }), + cost: ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + }, + }; + let outputs = ExpectedBlockOutput { + transactions: vec![ + transfer_result.clone(), + transfer_result.clone(), + transfer_result, + ], + total_block_cost: ExecutionCost::ZERO, + }; + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + + let test_vector = ConsensusTestVector { + initial_balances, + epoch_blocks, + }; + ConsensusTest::new(function_name!(), test_vector).run(); +} + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); + let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); + + let tx_fee = (tx_exceeds_body.len() * 100) as u64; + let tx_bytes = make_contract_publish( + &sender_privk, + 0, + tx_fee, + CHAIN_ID_TESTNET, + "test-exceeds", + &tx_exceeds_body, + ); + + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx_fee, + )]; + let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( + ParseErrorKind::ExpressionStackDepthTooDeep, + ))); + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block ff0796f9934d45aad71871f317061acb99dd5ef1237a8747a78624a2824f7d32: {e:?}" + )) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block 9da03cdc774989cea30445f1453073b070430867edcecb180d1cc9a6e9738b46: {e:?}" + )) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block 76a6d95b3ec273a13f10080b3b18e225cc838044c5e3a3000b7ccdd8b50a5ae1: {e:?}" + )) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block de3c507ab60e717275f97f267ec2608c96aaab42a7e32fc2d8129585dff9e74a: {e:?}" + )) + .to_string(), + ), + }], + ); + + let test_vector = ConsensusTestVector { + initial_balances, + epoch_blocks, + }; + ConsensusTest::new(function_name!(), test_vector).run(); +} diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs new file mode 100644 index 00000000000..1ed1394971a --- /dev/null +++ b/stackslib/src/chainstate/tests/mod.rs @@ -0,0 +1,1521 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +pub mod consensus; + +use std::fs; + +use clarity::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, +}; +use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::database::STXBalance; +use clarity::vm::types::*; +use clarity::vm::ContractName; +use rand; +use rand::{thread_rng, Rng}; +use stacks_common::address::*; +use stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::*; +use stacks_common::util::vrf::*; + +use self::nakamoto::test_signers::TestSigners; +use super::*; +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::bitcoin::spv::BITCOIN_GENESIS_BLOCK_HASH_REGTEST; +use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; +use crate::burnchains::tests::*; +use crate::burnchains::*; +use crate::chainstate::burn::db::sortdb::*; +use crate::chainstate::burn::operations::*; +use crate::chainstate::burn::*; +use crate::chainstate::coordinator::tests::*; +use crate::chainstate::coordinator::{Error as CoordinatorError, *}; +use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; +use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::tests::node::{get_nakamoto_parent, TestStacker}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, StacksDBIndexed}; +use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::test::{get_parent_tip, make_pox_4_lockup_chain_id}; +use crate::chainstate::stacks::db::{StacksChainState, *}; +use crate::chainstate::stacks::tests::*; +use crate::chainstate::stacks::{Error as ChainstateError, StacksMicroblockHeader, *}; +use crate::core::{EpochList, StacksEpoch, StacksEpochExtension, BOOT_BLOCK_HASH}; +use crate::net::relay::Relayer; +use crate::net::test::TestEventObserver; +use crate::util_lib::boot::{boot_code_test_addr, boot_code_tx_auth}; +use crate::util_lib::signed_structured_data::pox4::{ + make_pox_4_signer_key_signature, Pox4SignatureTopic, +}; +use crate::util_lib::strings::*; + +// describes a chainstate's initial configuration +#[derive(Debug, Clone)] +pub struct TestChainstateConfig { + pub network_id: u32, + pub current_block: u64, + pub burnchain: Burnchain, + pub test_name: String, + pub initial_balances: Vec<(PrincipalData, u64)>, + pub initial_lockups: Vec, + pub spending_account: TestMiner, + pub setup_code: String, + pub epochs: Option, + pub test_stackers: Option>, + pub test_signers: Option, + /// aggregate public key to use + /// (NOTE: will be used post-Nakamoto) + pub aggregate_public_key: Option>, + pub txindex: bool, +} + +impl Default for TestChainstateConfig { + fn default() -> Self { + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_GENESIS_BLOCK_HASH_REGTEST).unwrap(), + ); + + burnchain.pox_constants = PoxConstants::test_20_no_sunset(); + let mut spending_account = TestMinerFactory::new().next_miner( + burnchain.clone(), + 1, + 1, + AddressHashMode::SerializeP2PKH, + ); + spending_account.test_with_tx_fees = false; // manually set transaction fees + + Self { + network_id: 0x80000000, + current_block: (burnchain.consensus_hash_lifetime + 1) as u64, + burnchain, + test_name: "".into(), + initial_balances: vec![], + initial_lockups: vec![], + spending_account, + setup_code: "".into(), + epochs: None, + aggregate_public_key: None, + test_stackers: None, + test_signers: None, + txindex: false, + } + } +} + +impl TestChainstateConfig { + pub fn new(test_name: &str) -> Self { + Self { + test_name: test_name.into(), + ..Self::default() + } + } +} + +pub struct TestChainstate<'a> { + pub config: TestChainstateConfig, + pub sortdb: Option, + pub miner: TestMiner, + pub stacks_node: Option, + pub indexer: Option, + pub coord: ChainsCoordinator< + 'a, + TestEventObserver, + (), + OnChainRewardSetProvider<'a, TestEventObserver>, + (), + (), + BitcoinIndexer, + >, + pub nakamoto_parent_tenure_opt: Option>, + /// list of malleablized blocks produced when mining. + pub malleablized_blocks: Vec, + pub mine_malleablized_blocks: bool, + pub test_path: String, + pub chainstate_path: String, +} + +impl<'a> TestChainstate<'a> { + pub fn new(config: TestChainstateConfig) -> TestChainstate<'a> { + Self::new_with_observer(config, None) + } + + pub fn test_path(config: &TestChainstateConfig) -> String { + let random = thread_rng().gen::(); + let random_bytes = to_hex(&random.to_be_bytes()); + let cleaned_config_test_name = config.test_name.replace("::", "_"); + format!( + "/tmp/stacks-node-tests/units-test-consensus/{cleaned_config_test_name}-{random_bytes}" + ) + } + + pub fn make_test_path(config: &TestChainstateConfig) -> String { + let test_path = Self::test_path(config); + if fs::metadata(&test_path).is_ok() { + fs::remove_dir_all(&test_path).unwrap(); + }; + + fs::create_dir_all(&test_path).unwrap(); + test_path + } + + pub fn new_with_observer( + mut config: TestChainstateConfig, + observer: Option<&'a TestEventObserver>, + ) -> TestChainstate<'a> { + let test_path = Self::make_test_path(&config); + let chainstate_path = get_chainstate_path_str(&test_path); + let mut miner_factory = TestMinerFactory::new(); + miner_factory.chain_id = config.network_id; + let mut miner = miner_factory.next_miner( + config.burnchain.clone(), + 1, + 1, + AddressHashMode::SerializeP2PKH, + ); + // manually set fees + miner.test_with_tx_fees = false; + + config.burnchain.working_dir = get_burnchain(&test_path, None).working_dir; + + let epochs = config.epochs.clone().unwrap_or_else(|| { + StacksEpoch::unit_test_pre_2_05(config.burnchain.first_block_height) + }); + + let mut sortdb = SortitionDB::connect( + &config.burnchain.get_db_path(), + config.burnchain.first_block_height, + &config.burnchain.first_block_hash, + 0, + &epochs, + config.burnchain.pox_constants.clone(), + None, + true, + ) + .unwrap(); + + let first_burnchain_block_height = config.burnchain.first_block_height; + let first_burnchain_block_hash = config.burnchain.first_block_hash.clone(); + + let _burnchain_blocks_db = BurnchainDB::connect( + &config.burnchain.get_burnchaindb_path(), + &config.burnchain, + true, + ) + .unwrap(); + + let agg_pub_key_opt = config.aggregate_public_key.clone(); + + let conf = config.clone(); + let post_flight_callback = move |clarity_tx: &mut ClarityTx| { + let mut receipts = vec![]; + + if let Some(agg_pub_key) = agg_pub_key_opt { + debug!("Setting aggregate public key to {}", &to_hex(&agg_pub_key)); + NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, agg_pub_key); + } else { + debug!("Not setting aggregate public key"); + } + // add test-specific boot code + if !conf.setup_code.is_empty() { + let receipt = clarity_tx.connection().as_transaction(|clarity| { + let boot_code_addr = boot_code_test_addr(); + let boot_code_account = StacksAccount { + principal: boot_code_addr.to_account_principal(), + nonce: 0, + stx_balance: STXBalance::zero(), + }; + + let boot_code_auth = boot_code_tx_auth(boot_code_addr.clone()); + + debug!( + "Instantiate test-specific boot code contract '{}.{}' ({} bytes)...", + &boot_code_addr.to_string(), + &conf.test_name, + conf.setup_code.len() + ); + + let smart_contract = TransactionPayload::SmartContract( + TransactionSmartContract { + name: ContractName::try_from( + conf.test_name + .replace("::", "-") + .chars() + .skip( + conf.test_name + .len() + .saturating_sub(CONTRACT_MAX_NAME_LENGTH), + ) + .collect::() + .trim_start_matches(|c: char| !c.is_alphabetic()) + .to_string(), + ) + .expect("FATAL: invalid boot-code contract name"), + code_body: StacksString::from_str(&conf.setup_code) + .expect("FATAL: invalid boot code body"), + }, + None, + ); + + let boot_code_smart_contract = StacksTransaction::new( + TransactionVersion::Testnet, + boot_code_auth, + smart_contract, + ); + StacksChainState::process_transaction_payload( + clarity, + &boot_code_smart_contract, + &boot_code_account, + None, + ) + .unwrap() + }); + receipts.push(receipt); + } + debug!("Bootup receipts: {receipts:?}"); + }; + + let mut boot_data = ChainStateBootData::new( + &config.burnchain, + config.initial_balances.clone(), + Some(Box::new(post_flight_callback)), + ); + + if !config.initial_lockups.is_empty() { + let lockups = config.initial_lockups.clone(); + boot_data.get_bulk_initial_lockups = + Some(Box::new(move || Box::new(lockups.into_iter()))); + } + + let (chainstate, _) = StacksChainState::open_and_exec( + false, + config.network_id, + &chainstate_path, + Some(&mut boot_data), + None, + ) + .unwrap(); + + let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); + let mut coord = ChainsCoordinator::test_new_full( + &config.burnchain, + config.network_id, + &test_path, + OnChainRewardSetProvider(observer), + observer, + indexer, + None, + config.txindex, + ); + coord.handle_new_burnchain_block().unwrap(); + + let mut stacks_node = TestStacksNode::from_chainstate(chainstate); + + { + // pre-populate burnchain, if running on bitcoin + let prev_snapshot = SortitionDB::get_first_block_snapshot(sortdb.conn()).unwrap(); + let mut fork = TestBurnchainFork::new( + prev_snapshot.block_height, + &prev_snapshot.burn_header_hash, + &prev_snapshot.index_root, + 0, + ); + for i in prev_snapshot.block_height..config.current_block { + let burn_block = { + let ic = sortdb.index_conn(); + let mut burn_block = fork.next_block(&ic); + stacks_node.add_key_register(&mut burn_block, &mut miner); + burn_block + }; + fork.append_block(burn_block); + + fork.mine_pending_blocks_pox(&mut sortdb, &config.burnchain, &mut coord); + } + } + + let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); + + TestChainstate { + config, + sortdb: Some(sortdb), + miner, + stacks_node: Some(stacks_node), + test_path, + chainstate_path, + coord, + indexer: Some(indexer), + nakamoto_parent_tenure_opt: None, + malleablized_blocks: vec![], + mine_malleablized_blocks: true, + } + } + + // Advances a TestChainstate to the Nakamoto epoch + pub fn advance_to_nakamoto_epoch(&mut self, private_key: &StacksPrivateKey, nonce: &mut usize) { + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(private_key)); + let default_pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); + + let mut sortition_height = self.get_burn_block_height(); + debug!("\n\n======================"); + debug!("PoxConstants = {:#?}", &self.config.burnchain.pox_constants); + debug!("tip = {sortition_height}"); + debug!("========================\n\n"); + + let epoch_25_height = self + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height; + + let epoch_30_height = self + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch30) + .unwrap() + .start_height; + + // Advance to just past PoX-4 instantiation + let mut blocks_produced = false; + while sortition_height <= epoch_25_height { + self.tenure_with_txs(&[], nonce); + sortition_height = self.get_burn_block_height(); + blocks_produced = true; + } + + // Ensure at least one block is produced before PoX-4 lockups + if !blocks_produced { + self.tenure_with_txs(&[], nonce); + sortition_height = self.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Make PoX-4 lockups"); + debug!("========================\n\n"); + + let reward_cycle = self + .config + .burnchain + .block_height_to_reward_cycle(sortition_height) + .unwrap(); + + // Create PoX-4 lockup transactions + let stack_txs: Vec<_> = self + .config + .test_stackers + .clone() + .unwrap_or_default() + .iter() + .map(|test_stacker| { + let pox_addr = test_stacker + .pox_addr + .clone() + .unwrap_or(default_pox_addr.clone()); + let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &test_stacker.signer_private_key, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + self.config.network_id, + 12, + max_amount, + 1, + ) + .unwrap() + .to_rsv(); + make_pox_4_lockup_chain_id( + &test_stacker.stacker_private_key, + 0, + test_stacker.amount, + &pox_addr, + 12, + &StacksPublicKey::from_private(&test_stacker.signer_private_key), + sortition_height + 1, + Some(signature), + max_amount, + 1, + self.config.network_id, + ) + }) + .collect(); + + let stacks_block = self.tenure_with_txs(&stack_txs, nonce); + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(stacks_block, stacks_tip); + + debug!("\n\n======================"); + debug!("Advance to the Prepare Phase"); + debug!("========================\n\n"); + + // Advance to the prepare phase + while !self.config.burnchain.is_in_prepare_phase(sortition_height) { + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + let stacks_block = self.tenure_with_txs(&[], nonce); + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_ne!(old_tip, stacks_tip); + sortition_height = self.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Advance to Epoch 3.0"); + debug!("========================\n\n"); + + // Advance to Epoch 3.0 + while sortition_height < epoch_30_height - 1 { + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + self.tenure_with_txs(&[], nonce); + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_ne!(old_tip, stacks_tip); + sortition_height = self.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Welcome to Nakamoto!"); + debug!("========================\n\n"); + } + + pub fn get_burnchain_db(&self, readwrite: bool) -> BurnchainDB { + BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), readwrite).unwrap() + } + + pub fn get_sortition_at_height(&self, height: u64) -> Option { + let sortdb = self.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip.sortition_id); + sort_handle.get_block_snapshot_by_height(height).unwrap() + } + + pub fn get_burnchain_block_ops( + &self, + burn_block_hash: &BurnchainHeaderHash, + ) -> Vec { + let burnchain_db = + BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), false).unwrap(); + burnchain_db + .get_burnchain_block_ops(burn_block_hash) + .unwrap() + } + + pub fn get_burnchain_block_ops_at_height( + &self, + height: u64, + ) -> Option> { + let sortdb = self.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip.sortition_id); + let Some(sn) = sort_handle.get_block_snapshot_by_height(height).unwrap() else { + return None; + }; + Some(self.get_burnchain_block_ops(&sn.burn_header_hash)) + } + + pub fn next_burnchain_block( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, false); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_diverge( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, true); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_and_missing_pox_anchor( + &mut self, + blockstack_ops: Vec, + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { + self.inner_next_burnchain_block(blockstack_ops, true, true, true, false) + } + + pub fn next_burnchain_block_raw( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, false, false, true, false); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_raw_sortition_only( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, false, false, false, false); + (x.0, x.1, x.2) + } + + pub fn next_burnchain_block_raw_and_missing_pox_anchor( + &mut self, + blockstack_ops: Vec, + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { + self.inner_next_burnchain_block(blockstack_ops, false, false, true, false) + } + + pub fn set_ops_consensus_hash( + blockstack_ops: &mut Vec, + ch: &ConsensusHash, + ) { + for op in blockstack_ops.iter_mut() { + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = op { + data.consensus_hash = (*ch).clone(); + } + } + } + + pub fn set_ops_burn_header_hash( + blockstack_ops: &mut Vec, + bhh: &BurnchainHeaderHash, + ) { + for op in blockstack_ops.iter_mut() { + op.set_burn_header_hash(bhh.clone()); + } + } + + pub fn make_next_burnchain_block( + burnchain: &Burnchain, + tip_block_height: u64, + tip_block_hash: &BurnchainHeaderHash, + num_ops: u64, + ops_determine_block_header: bool, + ) -> BurnchainBlockHeader { + test_debug!( + "make_next_burnchain_block: tip_block_height={tip_block_height} tip_block_hash={tip_block_hash} num_ops={num_ops}" + ); + let indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); + let parent_hdr = indexer + .read_burnchain_header(tip_block_height) + .unwrap() + .unwrap(); + + test_debug!("parent hdr ({tip_block_height}): {parent_hdr:?}"); + assert_eq!(&parent_hdr.block_hash, tip_block_hash); + + let now = BURNCHAIN_TEST_BLOCK_TIME; + let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( + &BitcoinIndexer::mock_bitcoin_header( + &parent_hdr.block_hash, + (now as u32) + + if ops_determine_block_header { + num_ops as u32 + } else { + 0 + }, + ) + .bitcoin_hash(), + ); + test_debug!( + "Block header hash at {} is {block_header_hash}", + tip_block_height + 1 + ); + + BurnchainBlockHeader { + block_height: tip_block_height + 1, + block_hash: block_header_hash.clone(), + parent_block_hash: parent_hdr.block_hash.clone(), + num_txs: num_ops, + timestamp: now, + } + } + + pub fn add_burnchain_block( + burnchain: &Burnchain, + block_header: &BurnchainBlockHeader, + blockstack_ops: Vec, + ) { + let mut burnchain_db = BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); + + let mut indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); + + test_debug!( + "Store header and block ops for {}-{} ({})", + &block_header.block_hash, + &block_header.parent_block_hash, + block_header.block_height + ); + indexer.raw_store_header(block_header.clone()).unwrap(); + burnchain_db + .raw_store_burnchain_block(burnchain, &indexer, block_header.clone(), blockstack_ops) + .unwrap(); + } + + /// Generate and commit the next burnchain block with the given block operations. + /// * if `set_consensus_hash` is true, then each op's consensus_hash field will be set to + /// that of the resulting block snapshot. + /// * if `set_burn_hash` is true, then each op's burnchain header hash field will be set to + /// that of the resulting block snapshot. + /// + /// Returns ( + /// burnchain tip block height, + /// burnchain tip block hash, + /// burnchain tip consensus hash, + /// Option + /// ) + fn inner_next_burnchain_block( + &mut self, + mut blockstack_ops: Vec, + set_consensus_hash: bool, + set_burn_hash: bool, + update_burnchain: bool, + ops_determine_block_header: bool, + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { + let sortdb = self.sortdb.take().unwrap(); + let (block_height, block_hash, epoch_id) = { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), tip.block_height + 1) + .unwrap() + .unwrap() + .epoch_id; + + if set_consensus_hash { + Self::set_ops_consensus_hash(&mut blockstack_ops, &tip.consensus_hash); + } + + let block_header = Self::make_next_burnchain_block( + &self.config.burnchain, + tip.block_height, + &tip.burn_header_hash, + blockstack_ops.len() as u64, + ops_determine_block_header, + ); + + if set_burn_hash { + Self::set_ops_burn_header_hash(&mut blockstack_ops, &block_header.block_hash); + } + + if update_burnchain { + Self::add_burnchain_block( + &self.config.burnchain, + &block_header, + blockstack_ops.clone(), + ); + } + (block_header.block_height, block_header.block_hash, epoch_id) + }; + + let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { + self.coord + .handle_new_burnchain_block() + .unwrap() + .into_missing_block_hash() + } else if self.coord.handle_new_nakamoto_burnchain_block().unwrap() { + None + } else { + Some(BlockHeaderHash([0x00; 32])) + }; + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + + test_debug!("\n\nafter burn block {block_hash:?}, tip PoX ID is {pox_id:?}\n\n"); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + self.sortdb = Some(sortdb); + ( + block_height, + block_hash, + tip.consensus_hash, + missing_pox_anchor_block_hash_opt, + ) + } + + /// Pre-process an epoch 2.x Stacks block. + /// Validate it and store it to staging. + pub fn preprocess_stacks_block(&mut self, block: &StacksBlock) -> Result { + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + let res = { + let sn = { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); + let sn_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap(); + if sn_opt.is_none() { + return Err(format!( + "No such block in canonical burn fork: {}", + &block.block_hash() + )); + } + sn_opt.unwrap() + }; + + let parent_sn = { + let db_handle = sortdb.index_handle(&sn.sortition_id); + let parent_sn = db_handle + .get_block_snapshot(&sn.parent_burn_header_hash) + .unwrap(); + parent_sn.unwrap() + }; + + let ic = sortdb.index_conn(); + node.chainstate + .preprocess_anchored_block( + &ic, + &sn.consensus_hash, + block, + &parent_sn.consensus_hash, + 5, + ) + .map_err(|e| format!("Failed to preprocess anchored block: {e:?}")) + }; + if res.is_ok() { + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + test_debug!( + "\n\n{:?}: after stacks block {:?}, tip PoX ID is {pox_id:?}\n\n", + &block.block_hash(), + &pox_id + ); + self.coord.handle_new_stacks_block().unwrap(); + } + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + res + } + + /// Preprocess epoch 2.x microblocks. + /// Validate them and store them to staging. + pub fn preprocess_stacks_microblocks( + &mut self, + microblocks: &[StacksMicroblock], + ) -> Result { + assert!(!microblocks.is_empty()); + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + let res = { + let anchor_block_hash = microblocks[0].header.prev_block.clone(); + let sn = { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); + let sn_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &anchor_block_hash, + ) + .unwrap(); + if sn_opt.is_none() { + return Err(format!( + "No such block in canonical burn fork: {anchor_block_hash}" + )); + } + sn_opt.unwrap() + }; + + let mut res = Ok(true); + for mblock in microblocks.iter() { + res = node + .chainstate + .preprocess_streamed_microblock(&sn.consensus_hash, &anchor_block_hash, mblock) + .map_err(|e| format!("Failed to preprocess microblock: {e:?}")); + + if res.is_err() { + break; + } + } + res + }; + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + res + } + + /// Store the given epoch 2.x Stacks block and microblock to staging, and then try and + /// process them. + pub fn process_stacks_epoch_at_tip( + &mut self, + block: &StacksBlock, + microblocks: &[StacksMicroblock], + ) { + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); + node.chainstate + .preprocess_stacks_epoch(&ic, &tip, block, microblocks) + .unwrap(); + } + self.coord.handle_new_stacks_block().unwrap(); + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + test_debug!( + "\n\nafter stacks block {:?}, tip PoX ID is {pox_id:?}\n\n", + &block.block_hash() + ); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + } + + /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, + /// using the given sortition DB as well, and then try and process them. + fn inner_process_stacks_epoch_at_tip( + &mut self, + sortdb: &SortitionDB, + node: &mut TestStacksNode, + block: &StacksBlock, + microblocks: &[StacksMicroblock], + ) -> Result<(), CoordinatorError> { + { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic)?; + node.chainstate + .preprocess_stacks_epoch(&ic, &tip, block, microblocks)?; + } + self.coord.handle_new_stacks_block()?; + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id)?; + sortdb_reader.get_pox_id()? + }; + test_debug!( + "\n\nafter stacks block {:?}, tip PoX ID is {:?}\n\n", + &block.block_hash(), + &pox_id + ); + Ok(()) + } + + /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, + /// and then try and process them. + pub fn process_stacks_epoch_at_tip_checked( + &mut self, + block: &StacksBlock, + microblocks: &[StacksMicroblock], + ) -> Result<(), CoordinatorError> { + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + let res = self.inner_process_stacks_epoch_at_tip(&sortdb, &mut node, block, microblocks); + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + res + } + + /// Accept a new Stacks block and microblocks via the relayer, and then try to process + /// them. + pub fn process_stacks_epoch( + &mut self, + block: &StacksBlock, + consensus_hash: &ConsensusHash, + microblocks: &[StacksMicroblock], + ) { + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + { + let ic = sortdb.index_conn(); + Relayer::process_new_anchored_block( + &ic, + &mut node.chainstate, + consensus_hash, + block, + 0, + ) + .unwrap(); + + let block_hash = block.block_hash(); + for mblock in microblocks.iter() { + node.chainstate + .preprocess_streamed_microblock(consensus_hash, &block_hash, mblock) + .unwrap(); + } + } + self.coord.handle_new_stacks_block().unwrap(); + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + + test_debug!( + "\n\nafter stacks block {:?}, tip PoX ID is {:?}\n\n", + &block.block_hash(), + &pox_id + ); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + } + + pub fn add_empty_burnchain_block(&mut self) -> (u64, BurnchainHeaderHash, ConsensusHash) { + self.next_burnchain_block(vec![]) + } + + pub fn mine_empty_tenure(&mut self) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let (burn_ops, ..) = self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let result = self.next_burnchain_block(burn_ops); + // remove the last block commit so that the testpeer doesn't try to build off of this tenure + self.miner.block_commits.pop(); + result + } + + pub fn chainstate(&mut self) -> &mut StacksChainState { + &mut self.stacks_node.as_mut().unwrap().chainstate + } + + pub fn chainstate_ref(&self) -> &StacksChainState { + &self.stacks_node.as_ref().unwrap().chainstate + } + + pub fn sortdb(&mut self) -> &mut SortitionDB { + self.sortdb.as_mut().unwrap() + } + + pub fn sortdb_ref(&self) -> &SortitionDB { + self.sortdb.as_ref().unwrap() + } + + pub fn stacks_node(&mut self) -> &mut TestStacksNode { + self.stacks_node.as_mut().unwrap() + } + + pub fn stacks_node_ref(&self) -> &TestStacksNode { + self.stacks_node.as_ref().unwrap() + } + + /// Make a tenure with the given transactions. Creates a coinbase tx with the given nonce, and then increments + /// the provided reference. + pub fn tenure_with_txs( + &mut self, + txs: &[StacksTransaction], + coinbase_nonce: &mut usize, + ) -> StacksBlockId { + let microblock_privkey = self.miner.next_microblock_privkey(); + let microblock_pubkeyhash = + Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); + let tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.as_ref().unwrap().conn()) + .unwrap(); + let burnchain = self.config.burnchain.clone(); + + let (burn_ops, stacks_block, microblocks) = self.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); + let coinbase_tx = make_coinbase(miner, *coinbase_nonce); + + let mut block_txs = vec![coinbase_tx]; + block_txs.extend_from_slice(txs); + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof, + tip.total_burn, + µblock_pubkeyhash, + ) + .unwrap(); + let (anchored_block, _size, _cost) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_handle(&tip.sortition_id), + block_txs, + ) + .unwrap(); + (anchored_block, vec![]) + }, + ); + + let (_, _, consensus_hash) = self.next_burnchain_block(burn_ops); + self.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + *coinbase_nonce += 1; + + StacksBlockId::new(&consensus_hash, &stacks_block.block_hash()) + } + + /// Make a tenure, using `tenure_builder` to generate a Stacks block and a list of + /// microblocks. + pub fn make_tenure( + &mut self, + mut tenure_builder: F, + ) -> ( + Vec, + StacksBlock, + Vec, + ) + where + F: FnMut( + &mut TestMiner, + &mut SortitionDB, + &mut StacksChainState, + &VRFProof, + Option<&StacksBlock>, + Option<&StacksMicroblockHeader>, + ) -> (StacksBlock, Vec), + { + let mut sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut burn_block = TestBurnchainBlock::new(&tip, 0); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let parent_block_opt = stacks_node.get_last_anchored_block(&self.miner); + let parent_sortition_opt = parent_block_opt.as_ref().and_then(|parent_block| { + let ic = sortdb.index_conn(); + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap() + }); + + let parent_microblock_header_opt = + get_last_microblock_header(&stacks_node, &self.miner, parent_block_opt.as_ref()); + let last_key = stacks_node.get_last_key(&self.miner); + + let network_id = self.config.network_id; + let chainstate_path = get_chainstate_path_str(&self.config.test_name); + let burn_block_height = burn_block.block_height; + + let proof = self + .miner + .make_proof( + &last_key.public_key, + &burn_block.parent_snapshot.sortition_hash, + ) + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", last_key.public_key)); + + let (stacks_block, microblocks) = tenure_builder( + &mut self.miner, + &mut sortdb, + &mut stacks_node.chainstate, + &proof, + parent_block_opt.as_ref(), + parent_microblock_header_opt.as_ref(), + ); + + let mut block_commit_op = stacks_node.make_tenure_commitment( + &sortdb, + &mut burn_block, + &mut self.miner, + &stacks_block, + microblocks.clone(), + 1000, + &last_key, + parent_sortition_opt.as_ref(), + ); + + // patch up block-commit -- these blocks all mine off of genesis + if stacks_block.header.parent_block == BlockHeaderHash([0u8; 32]) { + block_commit_op.parent_block_ptr = 0; + block_commit_op.parent_vtxindex = 0; + } + + let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + + // patch in reward set info + let recipients = get_next_recipients( + &tip, + &mut stacks_node.chainstate, + &mut sortdb, + &self.config.burnchain, + &OnChainRewardSetProvider::new(), + ) + .unwrap_or_else(|e| panic!("Failure fetching recipient set: {e:?}")); + block_commit_op.commit_outs = match recipients { + Some(info) => { + let mut recipients = info + .recipients + .into_iter() + .map(|x| x.0) + .collect::>(); + if recipients.len() == 1 { + recipients.push(PoxAddress::standard_burn_address(false)); + } + recipients + } + None => { + if self + .config + .burnchain + .is_in_prepare_phase(burn_block.block_height) + { + vec![PoxAddress::standard_burn_address(false)] + } else { + vec![ + PoxAddress::standard_burn_address(false), + PoxAddress::standard_burn_address(false), + ] + } + } + }; + test_debug!( + "Block commit at height {} has {} recipients: {:?}", + block_commit_op.block_height, + block_commit_op.commit_outs.len(), + &block_commit_op.commit_outs + ); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + ( + vec![ + BlockstackOperationType::LeaderKeyRegister(leader_key_op), + BlockstackOperationType::LeaderBlockCommit(block_commit_op), + ], + stacks_block, + microblocks, + ) + } + + pub fn get_burn_block_height(&self) -> u64 { + SortitionDB::get_canonical_burn_chain_tip( + self.sortdb.as_ref().expect("Failed to get sortdb").conn(), + ) + .expect("Failed to get canonical burn chain tip") + .block_height + } + + pub fn get_reward_cycle(&self) -> u64 { + let block_height = self.get_burn_block_height(); + self.config + .burnchain + .block_height_to_reward_cycle(block_height) + .unwrap_or_else(|| panic!("Failed to get reward cycle for block height {block_height}")) + } + + /// Start the next Nakamoto tenure. + /// This generates the VRF key and block-commit txs, as well as the TenureChange and + /// leader key this commit references + pub fn begin_nakamoto_tenure( + &mut self, + tenure_change_cause: TenureChangeCause, + ) -> ( + Vec, + TenureChangePayload, + LeaderKeyRegisterOp, + ) { + let mut sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut burn_block = TestBurnchainBlock::new(&tip, 0); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let (last_tenure_id, parent_block_opt, parent_tenure_opt) = + if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + ( + nakamoto_parent_tenure.first().as_ref().unwrap().block_id(), + None, + Some(nakamoto_parent_tenure.clone()), + ) + } else { + get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) + }; + + // find the VRF leader key register tx to use. + // it's the one pointed to by the parent tenure + let parent_consensus_hash_and_tenure_start_id_opt = + if let Some(parent_tenure) = parent_tenure_opt.as_ref() { + let tenure_start_block = parent_tenure.first().unwrap(); + Some(( + tenure_start_block.header.consensus_hash.clone(), + tenure_start_block.block_id(), + )) + } else if let Some(parent_block) = parent_block_opt.as_ref() { + let parent_header_info = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + stacks_node.chainstate.db(), + &last_tenure_id, + ) + .unwrap() + .unwrap(); + Some(( + parent_header_info.consensus_hash.clone(), + parent_header_info.index_block_hash(), + )) + } else { + None + }; + + let (ch, parent_tenure_start_block_id) = parent_consensus_hash_and_tenure_start_id_opt + .clone() + .expect("No leader key"); + // it's possible that the parent was a shadow block. + // if so, find the highest non-shadow ancestor's block-commit, so we can + let mut cursor = ch; + let (tenure_sn, tenure_block_commit) = loop { + let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cursor) + .unwrap() + .unwrap(); + + let Some(tenure_block_commit) = get_block_commit_by_txid( + sortdb.conn(), + &tenure_sn.sortition_id, + &tenure_sn.winning_block_txid, + ) + .unwrap() else { + // parent must be a shadow block + let header = NakamotoChainState::get_block_header_nakamoto( + stacks_node.chainstate.db(), + &parent_tenure_start_block_id, + ) + .unwrap() + .unwrap() + .anchored_header + .as_stacks_nakamoto() + .cloned() + .unwrap(); + + assert!(header.is_shadow_block(), "Parent tenure start block ID {parent_tenure_start_block_id} has no block-commit and is not a shadow block"); + + cursor = stacks_node + .chainstate + .index_conn() + .get_parent_tenure_consensus_hash(&parent_tenure_start_block_id, &cursor) + .unwrap() + .unwrap(); + + continue; + }; + break (tenure_sn, tenure_block_commit); + }; + + let last_key = SortitionDB::get_leader_key_at( + &sortdb.index_conn(), + tenure_block_commit.key_block_ptr.into(), + tenure_block_commit.key_vtxindex.into(), + &tenure_sn.sortition_id, + ) + .unwrap() + .unwrap(); + + let network_id = self.config.network_id; + let chainstate_path = self.chainstate_path.clone(); + let burn_block_height = burn_block.block_height; + + let (mut block_commit_op, tenure_change_payload) = stacks_node.begin_nakamoto_tenure( + &sortdb, + &mut self.miner, + &mut burn_block, + &last_key, + parent_block_opt.as_ref(), + parent_tenure_opt.as_deref(), + 1000, + tenure_change_cause, + ); + + // patch up block-commit -- these blocks all mine off of genesis + if last_tenure_id == StacksBlockId(BOOT_BLOCK_HASH.0) { + block_commit_op.parent_block_ptr = 0; + block_commit_op.parent_vtxindex = 0; + } + + let mut burn_ops = vec![]; + if self.miner.last_VRF_public_key().is_none() { + let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + burn_ops.push(BlockstackOperationType::LeaderKeyRegister(leader_key_op)); + } + + // patch in reward set info + let recipients = get_nakamoto_next_recipients( + &tip, + &mut sortdb, + &mut stacks_node.chainstate, + &tenure_change_payload.previous_tenure_end, + &self.config.burnchain, + ) + .unwrap_or_else(|e| panic!("Failure fetching recipient set: {e:?}")); + block_commit_op.commit_outs = match recipients { + Some(info) => { + let mut recipients = info + .recipients + .into_iter() + .map(|x| x.0) + .collect::>(); + if recipients.len() == 1 { + recipients.push(PoxAddress::standard_burn_address(false)); + } + recipients + } + None => { + if self + .config + .burnchain + .is_in_prepare_phase(burn_block.block_height) + { + vec![PoxAddress::standard_burn_address(false)] + } else { + vec![ + PoxAddress::standard_burn_address(false), + PoxAddress::standard_burn_address(false), + ] + } + } + }; + test_debug!( + "Block commit at height {} has {} recipients: {:?}", + block_commit_op.block_height, + block_commit_op.commit_outs.len(), + &block_commit_op.commit_outs + ); + + burn_ops.push(BlockstackOperationType::LeaderBlockCommit(block_commit_op)); + + // prepare to mine + let miner_addr = self.miner.origin_address().unwrap(); + let miner_account = get_account(&mut stacks_node.chainstate, &sortdb, &miner_addr); + self.miner.set_nonce(miner_account.nonce); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + (burn_ops, tenure_change_payload, last_key) + } + + /// Make the VRF proof for this tenure. + /// Call after processing the block-commit + pub fn make_nakamoto_vrf_proof(&mut self, miner_key: LeaderKeyRegisterOp) -> VRFProof { + let sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let proof = self + .miner + .make_proof(&miner_key.public_key, &tip.sortition_hash) + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", miner_key.public_key)); + self.sortdb = Some(sortdb); + debug!( + "VRF proof made from {:?} over {}: {proof:?}", + miner_key.public_key, &tip.sortition_hash + ); + proof + } + + /// Produce and process a Nakamoto tenure, after processing the block-commit from + /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), + /// take the consensus hash, and feed it in here. + /// + /// Returns the blocks, their sizes, and runtime costs + pub fn make_nakamoto_tenure( + &mut self, + tenure_change: StacksTransaction, + coinbase: StacksTransaction, + timestamp: Option, + ) -> Result, ChainstateError> { + let cycle = self.get_reward_cycle(); + let mut signers = self.config.test_signers.clone().unwrap_or_default(); + signers.generate_aggregate_key(cycle); + + let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + let blocks = TestStacksNode::make_nakamoto_tenure_blocks( + &mut stacks_node.chainstate, + &mut sortdb, + &mut self.miner, + &mut signers, + &tenure_change + .try_as_tenure_change() + .unwrap() + .tenure_consensus_hash + .clone(), + Some(tenure_change), + Some(coinbase), + &mut self.coord, + |_| {}, + |_, _, _, _| vec![], + |_| true, + self.mine_malleablized_blocks, + self.nakamoto_parent_tenure_opt.is_none(), + timestamp, + )?; + + let just_blocks = blocks + .clone() + .into_iter() + .map(|(block, _, _, _)| block) + .collect(); + + stacks_node.add_nakamoto_tenure_blocks(just_blocks); + + let mut malleablized_blocks: Vec = blocks + .clone() + .into_iter() + .flat_map(|(_, _, _, malleablized)| malleablized) + .collect(); + + self.malleablized_blocks.append(&mut malleablized_blocks); + + let block_data = blocks + .into_iter() + .map(|(blk, sz, cost, _)| (blk, sz, cost)) + .collect(); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(stacks_node); + Ok(block_data) + } +} diff --git a/stackslib/src/clarity_vm/tests/ephemeral.rs b/stackslib/src/clarity_vm/tests/ephemeral.rs index 73b5897212c..d3abcf0167b 100644 --- a/stackslib/src/clarity_vm/tests/ephemeral.rs +++ b/stackslib/src/clarity_vm/tests/ephemeral.rs @@ -430,8 +430,8 @@ fn test_ephemeral_nakamoto_block_replay_simple() { ); // read out all Nakamoto blocks - let sortdb = peer.sortdb.take().unwrap(); - let mut stacks_node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut stacks_node = peer.chain.stacks_node.take().unwrap(); let naka_tip = NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) .unwrap() @@ -733,8 +733,8 @@ fn test_ephemeral_nakamoto_block_replay_smart_contract() { let (mut peer, _other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(&observer)); // read out all Nakamoto blocks - let sortdb = peer.sortdb.take().unwrap(); - let mut stacks_node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut stacks_node = peer.chain.stacks_node.take().unwrap(); let naka_tip = NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) .unwrap() diff --git a/stackslib/src/net/api/tests/blockreplay.rs b/stackslib/src/net/api/tests/blockreplay.rs index 31e727727b1..6d5fc618752 100644 --- a/stackslib/src/net/api/tests/blockreplay.rs +++ b/stackslib/src/net/api/tests/blockreplay.rs @@ -71,7 +71,7 @@ fn test_block_reply_errors() { let test_observer = TestEventObserver::new(); let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); - let sort_db = rpc_test.peer_1.sortdb.take().unwrap(); + let sort_db = rpc_test.peer_1.chain.sortdb.take().unwrap(); let chainstate = rpc_test.peer_1.chainstate(); let err = handler.block_replay(&sort_db, chainstate).err().unwrap(); diff --git a/stackslib/src/net/api/tests/getblock_v3.rs b/stackslib/src/net/api/tests/getblock_v3.rs index 272a52421dd..6c1a37edb60 100644 --- a/stackslib/src/net/api/tests/getblock_v3.rs +++ b/stackslib/src/net/api/tests/getblock_v3.rs @@ -123,11 +123,11 @@ fn test_stream_nakamoto_blocks() { .is_err()); let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let ih = sortdb.index_handle(&tip.sortition_id); let nakamoto_tip = ih.get_nakamoto_tip().unwrap().unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); nakamoto_tip }; diff --git a/stackslib/src/net/api/tests/gettenure.rs b/stackslib/src/net/api/tests/gettenure.rs index a4f26526116..96439a4a9fc 100644 --- a/stackslib/src/net/api/tests/gettenure.rs +++ b/stackslib/src/net/api/tests/gettenure.rs @@ -127,11 +127,11 @@ fn test_stream_nakamoto_tenure() { .is_err()); let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let ih = sortdb.index_handle(&tip.sortition_id); let nakamoto_tip = ih.get_nakamoto_tip().unwrap().unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); nakamoto_tip }; diff --git a/stackslib/src/net/api/tests/gettransaction.rs b/stackslib/src/net/api/tests/gettransaction.rs index 42ea956c609..1020caef1d5 100644 --- a/stackslib/src/net/api/tests/gettransaction.rs +++ b/stackslib/src/net/api/tests/gettransaction.rs @@ -111,7 +111,7 @@ fn test_try_make_response() { dummy_tip.0[0] = dummy_tip.0[0].wrapping_add(1); let peer = &rpc_test.peer_1; - let sortdb = peer.sortdb.as_ref().unwrap(); + let sortdb = peer.chain.sortdb.as_ref().unwrap(); let tenure_blocks = rpc_test .peer_1 .chainstate_ref() diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 97a60a6b43a..aacc86a0fcd 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -313,8 +313,8 @@ impl<'a> TestRPC<'a> { ) .unwrap(); - let mut peer_1_config = TestPeerConfig::new(&format!("{}-peer1", test_name), 0, 0); - let mut peer_2_config = TestPeerConfig::new(&format!("{}-peer2", test_name), 0, 0); + let mut peer_1_config = TestPeerConfig::new(&format!("{test_name}-peer1"), 0, 0); + let mut peer_2_config = TestPeerConfig::new(&format!("{test_name}-peer2"), 0, 0); peer_1_config.private_key = privk1.clone(); peer_2_config.private_key = privk2.clone(); @@ -349,15 +349,17 @@ impl<'a> TestRPC<'a> { StackerDBConfig::noop(), ); - let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer_1_config.burnchain.working_dir); - let peer_2_indexer = BitcoinIndexer::new_unit_test(&peer_2_config.burnchain.working_dir); + let peer_1_indexer = + BitcoinIndexer::new_unit_test(&peer_1_config.chain_config.burnchain.working_dir); + let peer_2_indexer = + BitcoinIndexer::new_unit_test(&peer_2_config.chain_config.burnchain.working_dir); - peer_1_config.initial_balances = vec![ + peer_1_config.chain_config.initial_balances = vec![ (addr1.to_account_principal(), 1000000000), (addr2.to_account_principal(), 1000000000), ]; - peer_2_config.initial_balances = vec![ + peer_2_config.chain_config.initial_balances = vec![ (addr1.to_account_principal(), 1000000000), (addr2.to_account_principal(), 1000000000), ]; @@ -365,7 +367,7 @@ impl<'a> TestRPC<'a> { peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let burnchain = peer_1_config.burnchain.clone(); + let burnchain = peer_1_config.chain_config.burnchain.clone(); with_peer_1_config(&mut peer_1_config); with_peer_2_config(&mut peer_2_config); @@ -482,8 +484,9 @@ impl<'a> TestRPC<'a> { tx.commit().unwrap(); } - let tip = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let mut anchor_cost = ExecutionCost::ZERO; let mut anchor_size = 0; @@ -545,7 +548,7 @@ impl<'a> TestRPC<'a> { // build 1-block microblock stream with the contract-call and the unconfirmed contract let microblock = { - let sortdb = peer_1.sortdb.take().unwrap(); + let sortdb = peer_1.chain.sortdb.take().unwrap(); Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); let mblock = { let sort_iconn = sortdb.index_handle_at_tip(); @@ -568,7 +571,7 @@ impl<'a> TestRPC<'a> { .unwrap(); microblock }; - peer_1.sortdb = Some(sortdb); + peer_1.chain.sortdb = Some(sortdb); mblock }; @@ -597,8 +600,8 @@ impl<'a> TestRPC<'a> { .unwrap(); // process microblock stream to generate unconfirmed state - let sortdb1 = peer_1.sortdb.take().unwrap(); - let sortdb2 = peer_2.sortdb.take().unwrap(); + let sortdb1 = peer_1.chain.sortdb.take().unwrap(); + let sortdb2 = peer_2.chain.sortdb.take().unwrap(); peer_1 .chainstate() .reload_unconfirmed_state(&sortdb1.index_handle_at_tip(), canonical_tip.clone()) @@ -607,8 +610,8 @@ impl<'a> TestRPC<'a> { .chainstate() .reload_unconfirmed_state(&sortdb2.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); - peer_1.sortdb = Some(sortdb1); - peer_2.sortdb = Some(sortdb2); + peer_1.chain.sortdb = Some(sortdb1); + peer_2.chain.sortdb = Some(sortdb2); } let mut mempool_txids = vec![]; @@ -684,23 +687,23 @@ impl<'a> TestRPC<'a> { mempool_tx.commit().unwrap(); peer_2.mempool.replace(mempool); - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let peer_1_sortdb = peer_1.chain.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.chain.stacks_node.take().unwrap(); let _ = peer_1 .network .refresh_burnchain_view(&peer_1_sortdb, &mut peer_1_stacks_node.chainstate, false) .unwrap(); - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.chain.sortdb = Some(peer_1_sortdb); + peer_1.chain.stacks_node = Some(peer_1_stacks_node); - let peer_2_sortdb = peer_2.sortdb.take().unwrap(); - let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); + let peer_2_sortdb = peer_2.chain.sortdb.take().unwrap(); + let mut peer_2_stacks_node = peer_2.chain.stacks_node.take().unwrap(); let _ = peer_2 .network .refresh_burnchain_view(&peer_2_sortdb, &mut peer_2_stacks_node.chainstate, false) .unwrap(); - peer_2.sortdb = Some(peer_2_sortdb); - peer_2.stacks_node = Some(peer_2_stacks_node); + peer_2.chain.sortdb = Some(peer_2_sortdb); + peer_2.chain.stacks_node = Some(peer_2_stacks_node); // insert some fake Atlas attachment data let attachment = Attachment { @@ -742,8 +745,9 @@ impl<'a> TestRPC<'a> { .unwrap(); // next tip, coinbase - let tip = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, @@ -903,9 +907,10 @@ impl<'a> TestRPC<'a> { }); let mut other_peer = other_peers.pop().unwrap(); - let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer.config.burnchain.working_dir); + let peer_1_indexer = + BitcoinIndexer::new_unit_test(&peer.config.chain_config.burnchain.working_dir); let peer_2_indexer = - BitcoinIndexer::new_unit_test(&other_peer.config.burnchain.working_dir); + BitcoinIndexer::new_unit_test(&other_peer.config.chain_config.burnchain.working_dir); let convo_1 = ConversationHttp::new( format!("127.0.0.1:{}", peer.config.http_port) @@ -931,12 +936,12 @@ impl<'a> TestRPC<'a> { let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let tip = NakamotoChainState::get_canonical_block_header(peer.chainstate().db(), &sortdb) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); tip }; @@ -944,14 +949,14 @@ impl<'a> TestRPC<'a> { let other_tip = SortitionDB::get_canonical_burn_chain_tip(other_peer.sortdb().conn()).unwrap(); let other_nakamoto_tip = { - let sortdb = other_peer.sortdb.take().unwrap(); + let sortdb = other_peer.chain.sortdb.take().unwrap(); let tip = NakamotoChainState::get_canonical_block_header( other_peer.chainstate().db(), &sortdb, ) .unwrap() .unwrap(); - other_peer.sortdb = Some(sortdb); + other_peer.chain.sortdb = Some(sortdb); tip }; @@ -1006,9 +1011,10 @@ impl<'a> TestRPC<'a> { }); let mut other_peer = other_peers.pop().unwrap(); - let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer.config.burnchain.working_dir); + let peer_1_indexer = + BitcoinIndexer::new_unit_test(&peer.config.chain_config.burnchain.working_dir); let peer_2_indexer = - BitcoinIndexer::new_unit_test(&other_peer.config.burnchain.working_dir); + BitcoinIndexer::new_unit_test(&other_peer.config.chain_config.burnchain.working_dir); let convo_1 = ConversationHttp::new( format!("127.0.0.1:{}", peer.config.http_port) @@ -1034,12 +1040,12 @@ impl<'a> TestRPC<'a> { let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let tip = NakamotoChainState::get_canonical_block_header(peer.chainstate().db(), &sortdb) .unwrap() .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); tip }; @@ -1047,14 +1053,14 @@ impl<'a> TestRPC<'a> { let other_tip = SortitionDB::get_canonical_burn_chain_tip(other_peer.sortdb().conn()).unwrap(); let other_nakamoto_tip = { - let sortdb = other_peer.sortdb.take().unwrap(); + let sortdb = other_peer.chain.sortdb.take().unwrap(); let tip = NakamotoChainState::get_canonical_block_header( other_peer.chainstate().db(), &sortdb, ) .unwrap() .unwrap(); - other_peer.sortdb = Some(sortdb); + other_peer.chain.sortdb = Some(sortdb); tip }; @@ -1127,8 +1133,8 @@ impl<'a> TestRPC<'a> { convo_send_recv(&mut convo_1, &mut convo_2); // hack around the borrow-checker - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let peer_1_sortdb = peer_1.chain.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.chain.stacks_node.take().unwrap(); if unconfirmed_state { Relayer::setup_unconfirmed_state( @@ -1152,21 +1158,21 @@ impl<'a> TestRPC<'a> { &mut peer_1_mempool, &rpc_args, false, - peer_1.config.txindex, + peer_1.config.chain_config.txindex, ); convo_1.chat(&mut node_state).unwrap(); } - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.chain.sortdb = Some(peer_1_sortdb); + peer_1.chain.stacks_node = Some(peer_1_stacks_node); peer_1.mempool = Some(peer_1_mempool); peer_2.mempool = Some(peer_2_mempool); debug!("test_rpc: Peer 2 sends to Peer 1"); // hack around the borrow-checker - let peer_2_sortdb = peer_2.sortdb.take().unwrap(); - let mut peer_2_stacks_node = peer_2.stacks_node.take().unwrap(); + let peer_2_sortdb = peer_2.chain.sortdb.take().unwrap(); + let mut peer_2_stacks_node = peer_2.chain.stacks_node.take().unwrap(); let mut peer_2_mempool = peer_2.mempool.take().unwrap(); let _ = peer_2 @@ -1196,13 +1202,13 @@ impl<'a> TestRPC<'a> { &mut peer_2_mempool, &rpc_args, false, - peer_2.config.txindex, + peer_2.config.chain_config.txindex, ); convo_2.chat(&mut node_state).unwrap(); } - peer_2.sortdb = Some(peer_2_sortdb); - peer_2.stacks_node = Some(peer_2_stacks_node); + peer_2.chain.sortdb = Some(peer_2_sortdb); + peer_2.chain.stacks_node = Some(peer_2_stacks_node); peer_2.mempool = Some(peer_2_mempool); convo_send_recv(&mut convo_2, &mut convo_1); @@ -1212,8 +1218,8 @@ impl<'a> TestRPC<'a> { // hack around the borrow-checker convo_send_recv(&mut convo_1, &mut convo_2); - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let peer_1_sortdb = peer_1.chain.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.chain.stacks_node.take().unwrap(); let _ = peer_1 .network @@ -1228,15 +1234,15 @@ impl<'a> TestRPC<'a> { .unwrap(); } - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.chain.sortdb = Some(peer_1_sortdb); + peer_1.chain.stacks_node = Some(peer_1_stacks_node); let resp_opt = loop { debug!("Peer 1 try get response"); convo_send_recv(&mut convo_1, &mut convo_2); { - let peer_1_sortdb = peer_1.sortdb.take().unwrap(); - let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); + let peer_1_sortdb = peer_1.chain.sortdb.take().unwrap(); + let mut peer_1_stacks_node = peer_1.chain.stacks_node.take().unwrap(); let mut peer_1_mempool = peer_1.mempool.take().unwrap(); let rpc_args = peer_1 @@ -1251,13 +1257,13 @@ impl<'a> TestRPC<'a> { &mut peer_1_mempool, &rpc_args, false, - peer_1.config.txindex, + peer_1.config.chain_config.txindex, ); convo_1.chat(&mut node_state).unwrap(); - peer_1.sortdb = Some(peer_1_sortdb); - peer_1.stacks_node = Some(peer_1_stacks_node); + peer_1.chain.sortdb = Some(peer_1_sortdb); + peer_1.chain.stacks_node = Some(peer_1_stacks_node); peer_1.mempool = Some(peer_1_mempool); } diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index efca96863b6..381b4b03780 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -234,17 +234,18 @@ fn test_try_make_response() { let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); let mut requests = vec![]; - let tip = - SortitionDB::get_canonical_burn_chain_tip(rpc_test.peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + rpc_test.peer_1.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); let (stacks_tip_ch, stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash( - rpc_test.peer_1.sortdb.as_ref().unwrap().conn(), + rpc_test.peer_1.chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh); - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); let mut good_block = { let chainstate = rpc_test.peer_1.chainstate(); @@ -305,7 +306,11 @@ fn test_try_make_response() { // Increment the timestamp by 1 to ensure it is different from the previous block good_block.header.timestamp += 1; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut good_block); + rpc_test + .peer_1 + .chain + .miner + .sign_nakamoto_block(&mut good_block); // post the valid block proposal let proposal = NakamotoBlockProposal { @@ -329,6 +334,7 @@ fn test_try_make_response() { early_time_block.header.timestamp -= 400; rpc_test .peer_1 + .chain .miner .sign_nakamoto_block(&mut early_time_block); @@ -354,6 +360,7 @@ fn test_try_make_response() { late_time_block.header.timestamp += 20000; rpc_test .peer_1 + .chain .miner .sign_nakamoto_block(&mut late_time_block); @@ -377,7 +384,11 @@ fn test_try_make_response() { // Set the timestamp to a value in the past (BEFORE the timeout) let mut stale_block = good_block.clone(); stale_block.header.timestamp -= 10000; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut stale_block); + rpc_test + .peer_1 + .chain + .miner + .sign_nakamoto_block(&mut stale_block); // post the invalid block proposal let proposal = NakamotoBlockProposal { @@ -501,7 +512,7 @@ fn replay_validation_test( let mut requests = vec![]; let (stacks_tip_ch, stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash( - rpc_test.peer_1.sortdb.as_ref().unwrap().conn(), + rpc_test.peer_1.chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh); @@ -560,6 +571,7 @@ fn replay_validation_test( proposed_block.header.timestamp += 1; rpc_test .peer_1 + .chain .miner .sign_nakamoto_block(&mut proposed_block); @@ -630,7 +642,7 @@ fn replay_validation_test( /// Tx replay test with mismatching mineable transactions. fn replay_validation_test_transaction_mismatch() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); // Transaction expected in the replay set (different amount) let tx_for_replay = make_stacks_transfer_tx( miner_privk, @@ -671,7 +683,7 @@ fn replay_validation_test_transaction_mismatch() { /// The block has the one mineable tx. fn replay_validation_test_transaction_unmineable_match() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); // Transaction expected in the replay set (different amount) let unmineable_tx = make_stacks_transfer_tx( miner_privk, @@ -712,7 +724,7 @@ fn replay_validation_test_transaction_unmineable_match() { fn replay_validation_test_transaction_unmineable_match_2() { let mut replay_set = vec![]; let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); // Unmineable tx let unmineable_tx = make_stacks_transfer_tx( miner_privk, @@ -767,7 +779,7 @@ fn replay_validation_test_transaction_unmineable_match_2() { /// The block has [mineable, mineable, tx_b, mineable] fn replay_validation_test_transaction_mineable_mismatch_series() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); // Mineable tx let mineable_tx_1 = make_stacks_transfer_tx( miner_privk, @@ -845,7 +857,7 @@ fn replay_validation_test_transaction_mineable_mismatch_series() { /// The block has [mineable, tx_a, tx_b] fn replay_validation_test_transaction_mineable_mismatch_series_2() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); let recipient_sk = StacksPrivateKey::random(); let recipient_addr = to_addr(&recipient_sk); @@ -906,7 +918,7 @@ fn replay_validation_test_transaction_mineable_mismatch_series_2() { /// have cost too much to include. fn replay_validation_test_budget_exceeded() { let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); let miner_addr = to_addr(miner_privk); let contract_code = make_big_read_count_contract(BLOCK_LIMIT_MAINNET_21, 50); @@ -984,7 +996,7 @@ fn replay_validation_test_budget_exceeded() { fn replay_validation_test_budget_exhausted() { let mut replay_set = vec![]; let result = replay_validation_test(|rpc_test| { - let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + let miner_privk = &rpc_test.peer_1.chain.miner.nakamoto_miner_key(); let miner_addr = to_addr(miner_privk); let contract_code = make_big_read_count_contract(BLOCK_LIMIT_MAINNET_21, 50); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 99d0514f88a..b51d1dd3722 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2235,37 +2235,28 @@ pub mod test { use std::net::*; use std::ops::{Deref, DerefMut}; use std::sync::Mutex; - use std::{fs, io, thread}; + use std::{io, thread}; use clarity::types::sqlite::NO_PARAMS; - use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; use clarity::vm::costs::ExecutionCost; - use clarity::vm::database::STXBalance; use clarity::vm::types::*; - use clarity::vm::ContractName; - use rand::{thread_rng, Rng, RngCore}; - use stacks_common::address::*; + use rand::RngCore; use stacks_common::codec::StacksMessageCodec; - use stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::*; use stacks_common::util::secp256k1::*; use stacks_common::util::vrf::*; use {mio, rand}; - use self::nakamoto::test_signers::TestSigners; use super::*; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; - use crate::burnchains::bitcoin::spv::BITCOIN_GENESIS_BLOCK_HASH_REGTEST; - use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; + use crate::burnchains::db::BurnchainDB; use crate::burnchains::tests::*; use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; - use crate::chainstate::coordinator::tests::*; use crate::chainstate::coordinator::{Error as coordinator_error, *}; - use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::test::get_parent_tip; use crate::chainstate::stacks::boot::*; @@ -2275,8 +2266,8 @@ pub mod test { use crate::chainstate::stacks::tests::chain_histories::mine_smart_contract_block_contract_call_microblock; use crate::chainstate::stacks::tests::*; use crate::chainstate::stacks::{StacksMicroblockHeader, *}; - use crate::chainstate::*; - use crate::core::{EpochList, StacksEpoch, StacksEpochExtension}; + use crate::chainstate::tests::{TestChainstate, TestChainstateConfig}; + use crate::core::{StacksEpoch, StacksEpochExtension}; use crate::cost_estimates::metrics::UnitMetric; use crate::cost_estimates::tests::fee_rate_fuzzer::ConstantFeeEstimator; use crate::cost_estimates::UnitEstimator; @@ -2288,7 +2279,6 @@ pub mod test { use crate::net::relay::*; use crate::net::stackerdb::{StackerDBSync, StackerDBs}; use crate::net::Error as net_error; - use crate::util_lib::boot::{boot_code_test_addr, boot_code_tx_auth}; use crate::util_lib::strings::*; impl StacksMessageCodec for BlockstackOperationType { @@ -2622,14 +2612,12 @@ pub mod test { // describes a peer's initial configuration #[derive(Debug, Clone)] pub struct TestPeerConfig { - pub network_id: u32, + pub chain_config: TestChainstateConfig, pub peer_version: u32, - pub current_block: u64, pub private_key: Secp256k1PrivateKey, pub private_key_expire: u64, pub initial_neighbors: Vec, pub asn4_entries: Vec, - pub burnchain: Burnchain, pub connection_opts: ConnectionOptions, pub server_port: u16, pub http_port: u16, @@ -2638,12 +2626,7 @@ pub mod test { pub allowed: i64, pub denied: i64, pub data_url: UrlString, - pub test_name: String, - pub initial_balances: Vec<(PrincipalData, u64)>, - pub initial_lockups: Vec, - pub spending_account: TestMiner, pub setup_code: String, - pub epochs: Option, /// If some(), TestPeer should check the PoX-2 invariants /// on cycle numbers bounded (inclusive) by the supplied u64s pub check_pox_invariants: Option<(u64, u64)>, @@ -2654,41 +2637,18 @@ pub mod test { pub stacker_db_configs: Vec>, /// What services should this peer support? pub services: u16, - /// aggregate public key to use - /// (NOTE: will be used post-Nakamoto) - pub aggregate_public_key: Option>, - pub test_stackers: Option>, - pub test_signers: Option, - pub txindex: bool, } - impl TestPeerConfig { - pub fn default() -> TestPeerConfig { + impl Default for TestPeerConfig { + fn default() -> Self { let conn_opts = ConnectionOptions::default(); - let start_block = 0; - let mut burnchain = Burnchain::default_unittest( - start_block, - &BurnchainHeaderHash::from_hex(BITCOIN_GENESIS_BLOCK_HASH_REGTEST).unwrap(), - ); - - burnchain.pox_constants = PoxConstants::test_20_no_sunset(); - let mut spending_account = TestMinerFactory::new().next_miner( - burnchain.clone(), - 1, - 1, - AddressHashMode::SerializeP2PKH, - ); - spending_account.test_with_tx_fees = false; // manually set transaction fees - - TestPeerConfig { - network_id: 0x80000000, + Self { + chain_config: TestChainstateConfig::default(), peer_version: 0x01020304, - current_block: start_block + (burnchain.consensus_hash_lifetime + 1) as u64, private_key: Secp256k1PrivateKey::random(), - private_key_expire: start_block + conn_opts.private_key_lifetime, + private_key_expire: conn_opts.private_key_lifetime, initial_neighbors: vec![], asn4_entries: vec![], - burnchain, connection_opts: conn_opts, server_port: 32000, http_port: 32001, @@ -2697,25 +2657,18 @@ pub mod test { allowed: 0, denied: 0, data_url: "".into(), - test_name: "".into(), - initial_balances: vec![], - initial_lockups: vec![], - spending_account, setup_code: "".into(), - epochs: None, check_pox_invariants: None, stacker_db_configs: vec![], stacker_dbs: vec![], services: (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16) | (ServiceFlags::STACKERDB as u16), - aggregate_public_key: None, - test_stackers: None, - test_signers: None, - txindex: false, } } + } + impl TestPeerConfig { pub fn from_port(p: u16) -> TestPeerConfig { let mut config = TestPeerConfig { server_port: p, @@ -2730,7 +2683,7 @@ pub mod test { pub fn new(test_name: &str, p2p_port: u16, rpc_port: u16) -> TestPeerConfig { let mut config = TestPeerConfig { - test_name: test_name.into(), + chain_config: TestChainstateConfig::new(test_name), server_port: p2p_port, http_port: rpc_port, ..TestPeerConfig::default() @@ -2749,7 +2702,7 @@ pub mod test { Neighbor { addr: NeighborKey { peer_version: self.peer_version, - network_id: self.network_id, + network_id: self.chain_config.network_id, addrbytes: PeerAddress([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1, ]), @@ -2818,28 +2771,9 @@ pub mod test { pub struct TestPeer<'a> { pub config: TestPeerConfig, pub network: PeerNetwork, - pub sortdb: Option, - pub miner: TestMiner, - pub stacks_node: Option, pub relayer: Relayer, pub mempool: Option, - pub chainstate_path: String, - pub indexer: Option, - pub coord: ChainsCoordinator< - 'a, - TestEventObserver, - (), - OnChainRewardSetProvider<'a, TestEventObserver>, - (), - (), - BitcoinIndexer, - >, - /// list of malleablized blocks produced when mining. - pub malleablized_blocks: Vec, - pub mine_malleablized_blocks: bool, - /// tenure-start block of tenure to mine on. - /// gets consumed on the call to begin_nakamoto_tenure - pub nakamoto_parent_tenure_opt: Option>, + pub chain: TestChainstate<'a>, /// RPC handler args to use pub rpc_handler_args: Option, } @@ -2849,37 +2783,13 @@ pub mod test { TestPeer::new_with_observer(config, None) } - pub fn test_path(config: &TestPeerConfig) -> String { - let random = thread_rng().gen::(); - let random_bytes = to_hex(&random.to_be_bytes()); - let cleaned_config_test_name = config.test_name.replace("::", "_"); - format!( - "/tmp/stacks-node-tests/units-test-peer/{}-{}", - &cleaned_config_test_name, random_bytes - ) - } - - pub fn stackerdb_path(config: &TestPeerConfig) -> String { - format!("{}/stacker_db.sqlite", &Self::test_path(config)) - } - - pub fn make_test_path(config: &TestPeerConfig) -> String { - let test_path = TestPeer::test_path(config); - if fs::metadata(&test_path).is_ok() { - fs::remove_dir_all(&test_path).unwrap(); - }; - - fs::create_dir_all(&test_path).unwrap(); - test_path - } - fn init_stackerdb_syncs( root_path: &str, peerdb: &PeerDB, stacker_dbs: &mut HashMap, ) -> HashMap)> { - let stackerdb_path = format!("{}/stacker_db.sqlite", root_path); + let stackerdb_path = format!("{root_path}/stacker_db.sqlite"); let mut stacker_db_syncs = HashMap::new(); let local_peer = PeerDB::get_local_peer(peerdb.conn()).unwrap(); for (i, (contract_id, db_config)) in stacker_dbs.iter_mut().enumerate() { @@ -2916,16 +2826,16 @@ pub mod test { ) -> TestPeer<'a> { let mut config = self.config.clone(); config.private_key = privkey; - config.test_name = format!( + config.chain_config.test_name = format!( "{}.neighbor-{}", - &self.config.test_name, + &self.config.chain_config.test_name, Hash160::from_node_public_key(&StacksPublicKey::from_private( &self.config.private_key )) ); config.server_port = 0; config.http_port = 0; - config.test_stackers = self.config.test_stackers.clone(); + config.chain_config.test_stackers = self.config.chain_config.test_stackers.clone(); config.initial_neighbors = vec![self.to_neighbor()]; let peer = TestPeer::new_with_observer(config, observer); @@ -2936,54 +2846,19 @@ pub mod test { mut config: TestPeerConfig, observer: Option<&'a TestEventObserver>, ) -> TestPeer<'a> { - let test_path = TestPeer::make_test_path(&config); - let mut miner_factory = TestMinerFactory::new(); - miner_factory.chain_id = config.network_id; - let mut miner = miner_factory.next_miner( - config.burnchain.clone(), - 1, - 1, - AddressHashMode::SerializeP2PKH, - ); - // manually set fees - miner.test_with_tx_fees = false; - - config.burnchain.working_dir = get_burnchain(&test_path, None).working_dir; - - let epochs = config.epochs.clone().unwrap_or_else(|| { - StacksEpoch::unit_test_pre_2_05(config.burnchain.first_block_height) - }); + let mut chain = + TestChainstate::new_with_observer(config.chain_config.clone(), observer); + // Write back the chain config as TestChainstate::new may have made modifications. + config.chain_config = chain.config.clone(); + let test_path = chain.test_path.clone(); - let mut sortdb = SortitionDB::connect( - &config.burnchain.get_db_path(), - config.burnchain.first_block_height, - &config.burnchain.first_block_hash, - 0, - &epochs, - config.burnchain.pox_constants.clone(), - None, - true, - ) - .unwrap(); - - let first_burnchain_block_height = config.burnchain.first_block_height; - let first_burnchain_block_hash = config.burnchain.first_block_hash.clone(); - - let _burnchain_blocks_db = BurnchainDB::connect( - &config.burnchain.get_burnchaindb_path(), - &config.burnchain, - true, - ) - .unwrap(); - - let chainstate_path = get_chainstate_path_str(&test_path); - let peerdb_path = format!("{}/peers.sqlite", &test_path); + let peerdb_path = format!("{test_path}/peers.sqlite"); let mut peerdb = PeerDB::connect( &peerdb_path, true, - config.network_id, - config.burnchain.network_id, + config.chain_config.network_id, + config.chain_config.burnchain.network_id, None, config.private_key_expire, PeerAddress::from_ipv4(127, 0, 0, 1), @@ -3011,138 +2886,9 @@ pub mod test { tx.commit().unwrap(); } - let atlasdb_path = format!("{}/atlas.sqlite", &test_path); + let atlasdb_path = format!("{test_path}/atlas.sqlite"); let atlasdb = AtlasDB::connect(AtlasConfig::new(false), &atlasdb_path, true).unwrap(); - let agg_pub_key_opt = config.aggregate_public_key.clone(); - - let conf = config.clone(); - let post_flight_callback = move |clarity_tx: &mut ClarityTx| { - let mut receipts = vec![]; - - if let Some(agg_pub_key) = agg_pub_key_opt { - debug!("Setting aggregate public key to {}", &to_hex(&agg_pub_key)); - NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, agg_pub_key); - } else { - debug!("Not setting aggregate public key"); - } - // add test-specific boot code - if !conf.setup_code.is_empty() { - let receipt = clarity_tx.connection().as_transaction(|clarity| { - let boot_code_addr = boot_code_test_addr(); - let boot_code_account = StacksAccount { - principal: boot_code_addr.to_account_principal(), - nonce: 0, - stx_balance: STXBalance::zero(), - }; - - let boot_code_auth = boot_code_tx_auth(boot_code_addr.clone()); - - debug!( - "Instantiate test-specific boot code contract '{}.{}' ({} bytes)...", - &boot_code_addr.to_string(), - &conf.test_name, - conf.setup_code.len() - ); - - let smart_contract = TransactionPayload::SmartContract( - TransactionSmartContract { - name: ContractName::try_from( - conf.test_name - .replace("::", "-") - .chars() - .skip( - conf.test_name - .len() - .saturating_sub(CONTRACT_MAX_NAME_LENGTH), - ) - .collect::() - .trim_start_matches(|c: char| !c.is_alphabetic()) - .to_string(), - ) - .expect("FATAL: invalid boot-code contract name"), - code_body: StacksString::from_str(&conf.setup_code) - .expect("FATAL: invalid boot code body"), - }, - None, - ); - - let boot_code_smart_contract = StacksTransaction::new( - TransactionVersion::Testnet, - boot_code_auth, - smart_contract, - ); - StacksChainState::process_transaction_payload( - clarity, - &boot_code_smart_contract, - &boot_code_account, - None, - ) - .unwrap() - }); - receipts.push(receipt); - } - debug!("Bootup receipts: {:?}", &receipts); - }; - - let mut boot_data = ChainStateBootData::new( - &config.burnchain, - config.initial_balances.clone(), - Some(Box::new(post_flight_callback)), - ); - - if !config.initial_lockups.is_empty() { - let lockups = config.initial_lockups.clone(); - boot_data.get_bulk_initial_lockups = - Some(Box::new(move || Box::new(lockups.into_iter()))); - } - - let (chainstate, _) = StacksChainState::open_and_exec( - false, - config.network_id, - &chainstate_path, - Some(&mut boot_data), - None, - ) - .unwrap(); - - let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); - let mut coord = ChainsCoordinator::test_new_full( - &config.burnchain, - config.network_id, - &test_path, - OnChainRewardSetProvider(observer), - observer, - indexer, - None, - config.txindex, - ); - coord.handle_new_burnchain_block().unwrap(); - - let mut stacks_node = TestStacksNode::from_chainstate(chainstate); - - { - // pre-populate burnchain, if running on bitcoin - let prev_snapshot = SortitionDB::get_first_block_snapshot(sortdb.conn()).unwrap(); - let mut fork = TestBurnchainFork::new( - prev_snapshot.block_height, - &prev_snapshot.burn_header_hash, - &prev_snapshot.index_root, - 0, - ); - for i in prev_snapshot.block_height..config.current_block { - let burn_block = { - let ic = sortdb.index_conn(); - let mut burn_block = fork.next_block(&ic); - stacks_node.add_key_register(&mut burn_block, &mut miner); - burn_block - }; - fork.append_block(burn_block); - - fork.mine_pending_blocks_pox(&mut sortdb, &config.burnchain, &mut coord); - } - } - let local_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), config.server_port); let http_local_addr = @@ -3167,11 +2913,16 @@ pub mod test { let local_peer = PeerDB::get_local_peer(peerdb.conn()).unwrap(); let burnchain_view = { - let chaintip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - SortitionDB::get_burnchain_view(&sortdb.index_conn(), &config.burnchain, &chaintip) - .unwrap() + let chaintip = + SortitionDB::get_canonical_burn_chain_tip(chain.sortdb().conn()).unwrap(); + SortitionDB::get_burnchain_view( + &chain.sortdb().index_conn(), + &config.chain_config.burnchain, + &chaintip, + ) + .unwrap() }; - let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let stackerdb_path = format!("{test_path}/stacker_db.sqlite"); let mut stacker_dbs_conn = StackerDBs::connect(&stackerdb_path, true).unwrap(); let relayer_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let p2p_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); @@ -3187,10 +2938,11 @@ pub mod test { .unwrap_or(StackerDBConfig::noop()), ); } + let mut stacks_node = chain.stacks_node.take().unwrap(); let mut stackerdb_configs = stacker_dbs_conn .create_or_reconfigure_stackerdbs( &mut stacks_node.chainstate, - &sortdb, + chain.sortdb_ref(), old_stackerdb_configs, &config.connection_opts, ) @@ -3201,7 +2953,15 @@ pub mod test { let stackerdb_contracts: Vec<_> = stacker_db_syncs.keys().cloned().collect(); - let burnchain_db = config.burnchain.open_burnchain_db(false).unwrap(); + let burnchain_db = config + .chain_config + .burnchain + .open_burnchain_db(false) + .unwrap(); + + let epochs = config.chain_config.epochs.clone().unwrap_or_else(|| { + StacksEpoch::unit_test_pre_2_05(config.chain_config.burnchain.first_block_height) + }); let mut peer_network = PeerNetwork::new( peerdb, @@ -3210,7 +2970,7 @@ pub mod test { burnchain_db, local_peer, config.peer_version, - config.burnchain.clone(), + config.chain_config.burnchain.clone(), burnchain_view, config.connection_opts.clone(), stacker_db_syncs, @@ -3220,26 +2980,30 @@ pub mod test { peer_network.bind(&local_addr, &http_local_addr).unwrap(); let relayer = Relayer::from_p2p(&mut peer_network, relayer_stacker_dbs); - let mempool = MemPoolDB::open_test(false, config.network_id, &chainstate_path).unwrap(); - let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); + let mempool = MemPoolDB::open_test( + false, + config.chain_config.network_id, + &chain.chainstate_path, + ) + .unwrap(); // extract bound ports (which may be different from what's in the config file, if e.g. // they were 0) let p2p_port = peer_network.bound_neighbor_key().port; let http_port = peer_network.http.as_ref().unwrap().http_server_addr.port(); - debug!("Bound to (p2p={}, http={})", p2p_port, http_port); + debug!("Bound to (p2p={p2p_port}, http={http_port})"); config.server_port = p2p_port; config.http_port = http_port; config.data_url = - UrlString::try_from(format!("http://127.0.0.1:{}", http_port).as_str()).unwrap(); + UrlString::try_from(format!("http://127.0.0.1:{http_port}").as_str()).unwrap(); peer_network .peerdb .update_local_peer( - config.network_id, - config.burnchain.network_id, + config.chain_config.network_id, + config.chain_config.burnchain.network_id, config.data_url.clone(), p2p_port, &stackerdb_contracts, @@ -3248,38 +3012,30 @@ pub mod test { let local_peer = PeerDB::get_local_peer(peer_network.peerdb.conn()).unwrap(); debug!( - "{:?}: initial neighbors: {:?}", - &local_peer, &config.initial_neighbors + "{local_peer:?}: initial neighbors: {:?}", + &config.initial_neighbors ); peer_network.local_peer = local_peer; - + chain.stacks_node = Some(stacks_node); TestPeer { config, + chain, network: peer_network, - sortdb: Some(sortdb), - miner, - stacks_node: Some(stacks_node), relayer, mempool: Some(mempool), - chainstate_path, - coord, - indexer: Some(indexer), - malleablized_blocks: vec![], - mine_malleablized_blocks: true, - nakamoto_parent_tenure_opt: None, rpc_handler_args: None, } } pub fn connect_initial(&mut self) -> Result<(), net_error> { let local_peer = PeerDB::get_local_peer(self.network.peerdb.conn()).unwrap(); - let chain_view = match self.sortdb { + let chain_view = match self.chain.sortdb { Some(ref mut sortdb) => { let chaintip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); SortitionDB::get_burnchain_view( &sortdb.index_conn(), - &self.config.burnchain, + &self.config.chain_config.burnchain, &chaintip, ) .unwrap() @@ -3311,7 +3067,7 @@ pub mod test { if bootstrap { PeerDB::set_initial_peer( &tx, - self.config.network_id, + self.config.chain_config.network_id, &n.addr.addrbytes, n.addr.port, ) @@ -3343,8 +3099,8 @@ pub mod test { } pub fn step(&mut self) -> Result { - let sortdb = self.sortdb.take().unwrap(); - let stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let stacks_node = self.chain.stacks_node.take().unwrap(); let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; @@ -3356,12 +3112,12 @@ pub mod test { .map(|hdr| hdr.anchored_header.height()) .unwrap_or(0); let ibd = TestPeer::infer_initial_burnchain_block_download( - &self.config.burnchain, + &self.config.chain_config.burnchain, stacks_tip_height, burn_tip_height, ); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); self.step_with_ibd(ibd) } @@ -3375,10 +3131,10 @@ pub mod test { ibd: bool, dns_client: Option<&mut DNSClient>, ) -> Result { - let sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); - let indexer = self.indexer.take().unwrap(); + let indexer = self.chain.indexer.take().unwrap(); let rpc_handler_args = self .rpc_handler_args .as_ref() @@ -3401,7 +3157,7 @@ pub mod test { ibd, 100, &rpc_handler_args, - self.config.txindex, + self.config.chain_config.txindex, ); if self.network.get_current_epoch().epoch_id >= StacksEpochId::Epoch30 { @@ -3428,10 +3184,10 @@ pub mod test { assert_eq!(self.network.epoch2_state_machine_passes, epoch2_passes + 1); } - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); self.mempool = Some(mempool); - self.indexer = Some(indexer); + self.chain.indexer = Some(indexer); ret } @@ -3441,10 +3197,10 @@ pub mod test { dns_client: Option<&mut DNSClient>, ) -> Result<(NetworkResult, ProcessedNetReceipts), net_error> { let net_result = self.step_with_ibd_and_dns(ibd, dns_client)?; - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); - let indexer = self.indexer.take().unwrap(); + let indexer = self.chain.indexer.take().unwrap(); let receipts_res = self.relayer.process_network_result( self.network.get_local_peer(), @@ -3458,23 +3214,24 @@ pub mod test { None, ); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); self.mempool = Some(mempool); - self.indexer = Some(indexer); + self.chain.indexer = Some(indexer); - self.coord.handle_new_burnchain_block().unwrap(); - self.coord.handle_new_stacks_block().unwrap(); - self.coord.handle_new_nakamoto_stacks_block().unwrap(); + self.chain.coord.handle_new_burnchain_block().unwrap(); + self.chain.coord.handle_new_stacks_block().unwrap(); + self.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); receipts_res.map(|receipts| (net_result, receipts)) } pub fn step_dns(&mut self, dns_client: &mut DNSClient) -> Result { - let sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); - let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let indexer = + BitcoinIndexer::new_unit_test(&self.config.chain_config.burnchain.working_dir); let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -3487,11 +3244,12 @@ pub mod test { .map(|hdr| hdr.anchored_header.height()) .unwrap_or(0); let ibd = TestPeer::infer_initial_burnchain_block_download( - &self.config.burnchain, + &self.config.chain_config.burnchain, stacks_tip_height, burn_tip_height, ); - let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let indexer = + BitcoinIndexer::new_unit_test(&self.config.chain_config.burnchain.working_dir); let rpc_handler_args = self .rpc_handler_args .as_ref() @@ -3513,7 +3271,7 @@ pub mod test { ibd, 100, &rpc_handler_args, - self.config.txindex, + self.config.chain_config.txindex, ); if self.network.get_current_epoch().epoch_id >= StacksEpochId::Epoch30 { @@ -3540,29 +3298,30 @@ pub mod test { assert_eq!(self.network.epoch2_state_machine_passes, epoch2_passes + 1); } - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); self.mempool = Some(mempool); ret } pub fn refresh_burnchain_view(&mut self) { - let sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); - let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let indexer = + BitcoinIndexer::new_unit_test(&self.config.chain_config.burnchain.working_dir); self.network .refresh_burnchain_view(&sortdb, &mut stacks_node.chainstate, false) .unwrap(); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); } pub fn refresh_reward_cycles(&mut self) { - let sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let tip_block_id = self.network.stacks_tip.block_id(); @@ -3578,8 +3337,8 @@ pub mod test { ) .unwrap(); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); } pub fn for_each_convo_p2p(&mut self, mut f: F) -> Vec> @@ -3595,57 +3354,39 @@ pub mod test { } pub fn get_burnchain_db(&self, readwrite: bool) -> BurnchainDB { - let burnchain_db = - BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), readwrite) - .unwrap(); - burnchain_db + self.chain.get_burnchain_db(readwrite) } pub fn get_sortition_at_height(&self, height: u64) -> Option { - let sortdb = self.sortdb.as_ref().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let sort_handle = sortdb.index_handle(&tip.sortition_id); - sort_handle.get_block_snapshot_by_height(height).unwrap() + self.chain.get_sortition_at_height(height) } pub fn get_burnchain_block_ops( &self, burn_block_hash: &BurnchainHeaderHash, ) -> Vec { - let burnchain_db = - BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), false).unwrap(); - burnchain_db - .get_burnchain_block_ops(burn_block_hash) - .unwrap() + self.chain.get_burnchain_block_ops(burn_block_hash) } pub fn get_burnchain_block_ops_at_height( &self, height: u64, ) -> Option> { - let sortdb = self.sortdb.as_ref().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let sort_handle = sortdb.index_handle(&tip.sortition_id); - let Some(sn) = sort_handle.get_block_snapshot_by_height(height).unwrap() else { - return None; - }; - Some(self.get_burnchain_block_ops(&sn.burn_header_hash)) + self.chain.get_burnchain_block_ops_at_height(height) } pub fn next_burnchain_block( &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, false); - (x.0, x.1, x.2) + self.chain.next_burnchain_block(blockstack_ops) } pub fn next_burnchain_block_diverge( &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, true); - (x.0, x.1, x.2) + self.chain.next_burnchain_block_diverge(blockstack_ops) } pub fn next_burnchain_block_and_missing_pox_anchor( @@ -3657,23 +3398,23 @@ pub mod test { ConsensusHash, Option, ) { - self.inner_next_burnchain_block(blockstack_ops, true, true, true, false) + self.chain + .next_burnchain_block_and_missing_pox_anchor(blockstack_ops) } pub fn next_burnchain_block_raw( &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, false, false, true, false); - (x.0, x.1, x.2) + self.chain.next_burnchain_block_raw(blockstack_ops) } pub fn next_burnchain_block_raw_sortition_only( &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, false, false, false, false); - (x.0, x.1, x.2) + self.chain + .next_burnchain_block_raw_sortition_only(blockstack_ops) } pub fn next_burnchain_block_raw_and_missing_pox_anchor( @@ -3685,210 +3426,15 @@ pub mod test { ConsensusHash, Option, ) { - self.inner_next_burnchain_block(blockstack_ops, false, false, true, false) - } - - pub fn set_ops_consensus_hash( - blockstack_ops: &mut Vec, - ch: &ConsensusHash, - ) { - for op in blockstack_ops.iter_mut() { - if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = op { - data.consensus_hash = (*ch).clone(); - } - } - } - - pub fn set_ops_burn_header_hash( - blockstack_ops: &mut Vec, - bhh: &BurnchainHeaderHash, - ) { - for op in blockstack_ops.iter_mut() { - op.set_burn_header_hash(bhh.clone()); - } - } - - pub fn make_next_burnchain_block( - burnchain: &Burnchain, - tip_block_height: u64, - tip_block_hash: &BurnchainHeaderHash, - num_ops: u64, - ops_determine_block_header: bool, - ) -> BurnchainBlockHeader { - test_debug!( - "make_next_burnchain_block: tip_block_height={} tip_block_hash={} num_ops={}", - tip_block_height, - tip_block_hash, - num_ops - ); - let indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); - let parent_hdr = indexer - .read_burnchain_header(tip_block_height) - .unwrap() - .unwrap(); - - test_debug!("parent hdr ({}): {:?}", &tip_block_height, &parent_hdr); - assert_eq!(&parent_hdr.block_hash, tip_block_hash); - - let now = BURNCHAIN_TEST_BLOCK_TIME; - let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( - &BitcoinIndexer::mock_bitcoin_header( - &parent_hdr.block_hash, - (now as u32) - + if ops_determine_block_header { - num_ops as u32 - } else { - 0 - }, - ) - .bitcoin_hash(), - ); - test_debug!( - "Block header hash at {} is {}", - tip_block_height + 1, - &block_header_hash - ); - - let block_header = BurnchainBlockHeader { - block_height: tip_block_height + 1, - block_hash: block_header_hash.clone(), - parent_block_hash: parent_hdr.block_hash.clone(), - num_txs: num_ops, - timestamp: now, - }; - - block_header - } - - pub fn add_burnchain_block( - burnchain: &Burnchain, - block_header: &BurnchainBlockHeader, - blockstack_ops: Vec, - ) { - let mut burnchain_db = - BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); - - let mut indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); - - test_debug!( - "Store header and block ops for {}-{} ({})", - &block_header.block_hash, - &block_header.parent_block_hash, - block_header.block_height - ); - indexer.raw_store_header(block_header.clone()).unwrap(); - burnchain_db - .raw_store_burnchain_block( - burnchain, - &indexer, - block_header.clone(), - blockstack_ops, - ) - .unwrap(); - } - - /// Generate and commit the next burnchain block with the given block operations. - /// * if `set_consensus_hash` is true, then each op's consensus_hash field will be set to - /// that of the resulting block snapshot. - /// * if `set_burn_hash` is true, then each op's burnchain header hash field will be set to - /// that of the resulting block snapshot. - /// - /// Returns ( - /// burnchain tip block height, - /// burnchain tip block hash, - /// burnchain tip consensus hash, - /// Option - /// ) - fn inner_next_burnchain_block( - &mut self, - mut blockstack_ops: Vec, - set_consensus_hash: bool, - set_burn_hash: bool, - update_burnchain: bool, - ops_determine_block_header: bool, - ) -> ( - u64, - BurnchainHeaderHash, - ConsensusHash, - Option, - ) { - let sortdb = self.sortdb.take().unwrap(); - let (block_height, block_hash, epoch_id) = { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), tip.block_height + 1) - .unwrap() - .unwrap() - .epoch_id; - - if set_consensus_hash { - TestPeer::set_ops_consensus_hash(&mut blockstack_ops, &tip.consensus_hash); - } - - let block_header = Self::make_next_burnchain_block( - &self.config.burnchain, - tip.block_height, - &tip.burn_header_hash, - blockstack_ops.len() as u64, - ops_determine_block_header, - ); - - if set_burn_hash { - TestPeer::set_ops_burn_header_hash( - &mut blockstack_ops, - &block_header.block_hash, - ); - } - - if update_burnchain { - Self::add_burnchain_block( - &self.config.burnchain, - &block_header, - blockstack_ops.clone(), - ); - } - (block_header.block_height, block_header.block_hash, epoch_id) - }; - - let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { - self.coord - .handle_new_burnchain_block() - .unwrap() - .into_missing_block_hash() - } else if self.coord.handle_new_nakamoto_burnchain_block().unwrap() { - None - } else { - Some(BlockHeaderHash([0x00; 32])) - }; - - let pox_id = { - let ic = sortdb.index_conn(); - let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); - let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); - sortdb_reader.get_pox_id().unwrap() - }; - - test_debug!( - "\n\n{:?}: after burn block {:?}, tip PoX ID is {:?}\n\n", - &self.to_neighbor().addr, - &block_hash, - &pox_id - ); - - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - self.sortdb = Some(sortdb); - ( - block_height, - block_hash, - tip.consensus_hash, - missing_pox_anchor_block_hash_opt, - ) + self.chain + .next_burnchain_block_raw_and_missing_pox_anchor(blockstack_ops) } /// Pre-process an epoch 2.x Stacks block. /// Validate it and store it to staging. pub fn preprocess_stacks_block(&mut self, block: &StacksBlock) -> Result { - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); let res = { let sn = { let ic = sortdb.index_conn(); @@ -3942,11 +3488,11 @@ pub mod test { &block.block_hash(), &pox_id ); - self.coord.handle_new_stacks_block().unwrap(); + self.chain.coord.handle_new_stacks_block().unwrap(); } - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); res } @@ -3956,50 +3502,7 @@ pub mod test { &mut self, microblocks: &[StacksMicroblock], ) -> Result { - assert!(!microblocks.is_empty()); - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); - let res = { - let anchor_block_hash = microblocks[0].header.prev_block.clone(); - let sn = { - let ic = sortdb.index_conn(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); - let sn_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &anchor_block_hash, - ) - .unwrap(); - if sn_opt.is_none() { - return Err(format!( - "No such block in canonical burn fork: {}", - &anchor_block_hash - )); - } - sn_opt.unwrap() - }; - - let mut res = Ok(true); - for mblock in microblocks.iter() { - res = node - .chainstate - .preprocess_streamed_microblock( - &sn.consensus_hash, - &anchor_block_hash, - mblock, - ) - .map_err(|e| format!("Failed to preprocess microblock: {:?}", &e)); - - if res.is_err() { - break; - } - } - res - }; - - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); - res + self.chain.preprocess_stacks_microblocks(microblocks) } /// Store the given epoch 2.x Stacks block and microblock to staging, and then try and @@ -4009,64 +3512,7 @@ pub mod test { block: &StacksBlock, microblocks: &[StacksMicroblock], ) { - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); - { - let ic = sortdb.index_conn(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); - node.chainstate - .preprocess_stacks_epoch(&ic, &tip, block, microblocks) - .unwrap(); - } - self.coord.handle_new_stacks_block().unwrap(); - - let pox_id = { - let ic = sortdb.index_conn(); - let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); - let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); - sortdb_reader.get_pox_id().unwrap() - }; - test_debug!( - "\n\n{:?}: after stacks block {:?}, tip PoX ID is {:?}\n\n", - &self.to_neighbor().addr, - &block.block_hash(), - &pox_id - ); - - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); - } - - /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, - /// using the given sortition DB as well, and then try and process them. - fn inner_process_stacks_epoch_at_tip( - &mut self, - sortdb: &SortitionDB, - node: &mut TestStacksNode, - block: &StacksBlock, - microblocks: &[StacksMicroblock], - ) -> Result<(), coordinator_error> { - { - let ic = sortdb.index_conn(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&ic)?; - node.chainstate - .preprocess_stacks_epoch(&ic, &tip, block, microblocks)?; - } - self.coord.handle_new_stacks_block()?; - - let pox_id = { - let ic = sortdb.index_conn(); - let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; - let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id)?; - sortdb_reader.get_pox_id()?; - }; - test_debug!( - "\n\n{:?}: after stacks block {:?}, tip PoX ID is {:?}\n\n", - &self.to_neighbor().addr, - &block.block_hash(), - &pox_id - ); - Ok(()) + self.chain.process_stacks_epoch_at_tip(block, microblocks); } /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, @@ -4076,13 +3522,8 @@ pub mod test { block: &StacksBlock, microblocks: &[StacksMicroblock], ) -> Result<(), coordinator_error> { - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); - let res = - self.inner_process_stacks_epoch_at_tip(&sortdb, &mut node, block, microblocks); - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); - res + self.chain + .process_stacks_epoch_at_tip_checked(block, microblocks) } /// Accept a new Stacks block and microblocks via the relayer, and then try to process @@ -4093,56 +3534,16 @@ pub mod test { consensus_hash: &ConsensusHash, microblocks: &[StacksMicroblock], ) { - let sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); - { - let ic = sortdb.index_conn(); - Relayer::process_new_anchored_block( - &ic, - &mut node.chainstate, - consensus_hash, - block, - 0, - ) - .unwrap(); - - let block_hash = block.block_hash(); - for mblock in microblocks.iter() { - node.chainstate - .preprocess_streamed_microblock(consensus_hash, &block_hash, mblock) - .unwrap(); - } - } - self.coord.handle_new_stacks_block().unwrap(); - - let pox_id = { - let ic = sortdb.index_conn(); - let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); - let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); - sortdb_reader.get_pox_id().unwrap() - }; - - test_debug!( - "\n\n{:?}: after stacks block {:?}, tip PoX ID is {:?}\n\n", - &self.to_neighbor().addr, - &block.block_hash(), - &pox_id - ); - - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain + .process_stacks_epoch(block, consensus_hash, microblocks); } pub fn add_empty_burnchain_block(&mut self) -> (u64, BurnchainHeaderHash, ConsensusHash) { - self.next_burnchain_block(vec![]) + self.chain.add_empty_burnchain_block() } pub fn mine_empty_tenure(&mut self) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let (burn_ops, ..) = self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let result = self.next_burnchain_block(burn_ops); - // remove the last block commit so that the testpeer doesn't try to build off of this tenure - self.miner.block_commits.pop(); - result + self.chain.mine_empty_tenure() } pub fn mempool(&mut self) -> &mut MemPoolDB { @@ -4150,33 +3551,33 @@ pub mod test { } pub fn chainstate(&mut self) -> &mut StacksChainState { - &mut self.stacks_node.as_mut().unwrap().chainstate + self.chain.chainstate() } pub fn chainstate_ref(&self) -> &StacksChainState { - &self.stacks_node.as_ref().unwrap().chainstate + self.chain.chainstate_ref() } pub fn sortdb(&mut self) -> &mut SortitionDB { - self.sortdb.as_mut().unwrap() + self.chain.sortdb() } pub fn sortdb_ref(&mut self) -> &SortitionDB { - self.sortdb.as_ref().unwrap() + self.chain.sortdb_ref() } pub fn with_dbs(&mut self, f: F) -> R where F: FnOnce(&mut TestPeer, &mut SortitionDB, &mut TestStacksNode, &mut MemPoolDB) -> R, { - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); let res = f(self, &mut sortdb, &mut stacks_node, &mut mempool); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); self.mempool = Some(mempool); res } @@ -4190,8 +3591,8 @@ pub mod test { &mut MemPoolDB, ) -> Result, { - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); let res = f( @@ -4201,8 +3602,8 @@ pub mod test { &mut mempool, ); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); self.mempool = Some(mempool); res } @@ -4216,16 +3617,16 @@ pub mod test { &mut TestStacksNode, ) -> Result, { - let mut stacks_node = self.stacks_node.take().unwrap(); - let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); let res = f( &mut sortdb, - &mut self.miner, - &mut self.config.spending_account, + &mut self.chain.miner, + &mut self.config.chain_config.spending_account, &mut stacks_node, ); - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); res } @@ -4239,8 +3640,8 @@ pub mod test { &mut MemPoolDB, ) -> Result, { - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); let res = f( @@ -4251,8 +3652,8 @@ pub mod test { &mut mempool, ); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); self.mempool = Some(mempool); res } @@ -4266,14 +3667,14 @@ pub mod test { &mut MemPoolDB, ) -> Result, { - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); let res = f(self, &mut sortdb, &mut stacks_node.chainstate, &mut mempool); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); self.mempool = Some(mempool); res } @@ -4285,13 +3686,14 @@ pub mod test { txs: &[StacksTransaction], coinbase_nonce: &mut usize, ) -> StacksBlockId { - let microblock_privkey = self.miner.next_microblock_privkey(); + let microblock_privkey = self.chain.miner.next_microblock_privkey(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); - let tip = - SortitionDB::get_canonical_burn_chain_tip(self.sortdb.as_ref().unwrap().conn()) - .unwrap(); - let burnchain = self.config.burnchain.clone(); + let tip = SortitionDB::get_canonical_burn_chain_tip( + self.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let burnchain = self.config.chain_config.burnchain.clone(); let (burn_ops, stacks_block, microblocks) = self.make_tenure( |ref mut miner, @@ -4374,13 +3776,13 @@ pub mod test { Option<&StacksMicroblockHeader>, ) -> (StacksBlock, Vec), { - let mut sortdb = self.sortdb.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let mut burn_block = TestBurnchainBlock::new(&tip, 0); - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); - let parent_block_opt = stacks_node.get_last_anchored_block(&self.miner); + let parent_block_opt = stacks_node.get_last_anchored_block(&self.chain.miner); let parent_sortition_opt = parent_block_opt.as_ref().and_then(|parent_block| { let ic = sortdb.index_conn(); SortitionDB::get_block_snapshot_for_winning_stacks_block( @@ -4391,15 +3793,19 @@ pub mod test { .unwrap() }); - let parent_microblock_header_opt = - get_last_microblock_header(&stacks_node, &self.miner, parent_block_opt.as_ref()); - let last_key = stacks_node.get_last_key(&self.miner); + let parent_microblock_header_opt = get_last_microblock_header( + &stacks_node, + &self.chain.miner, + parent_block_opt.as_ref(), + ); + let last_key = stacks_node.get_last_key(&self.chain.miner); - let network_id = self.config.network_id; - let chainstate_path = self.chainstate_path.clone(); + let network_id = self.config.chain_config.network_id; + let chainstate_path = self.chain.chainstate_path.clone(); let burn_block_height = burn_block.block_height; let proof = self + .chain .miner .make_proof( &last_key.public_key, @@ -4408,7 +3814,7 @@ pub mod test { .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", last_key.public_key)); let (stacks_block, microblocks) = tenure_builder( - &mut self.miner, + &mut self.chain.miner, &mut sortdb, &mut stacks_node.chainstate, &proof, @@ -4419,7 +3825,7 @@ pub mod test { let mut block_commit_op = stacks_node.make_tenure_commitment( &sortdb, &mut burn_block, - &mut self.miner, + &mut self.chain.miner, &stacks_block, microblocks.clone(), 1000, @@ -4433,14 +3839,15 @@ pub mod test { block_commit_op.parent_vtxindex = 0; } - let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + let leader_key_op = + stacks_node.add_key_register(&mut burn_block, &mut self.chain.miner); // patch in reward set info match get_next_recipients( &tip, &mut stacks_node.chainstate, &mut sortdb, - &self.config.burnchain, + &self.config.chain_config.burnchain, &OnChainRewardSetProvider::new(), ) { Ok(recipients) => { @@ -4459,6 +3866,7 @@ pub mod test { None => { if self .config + .chain_config .burnchain .is_in_prepare_phase(burn_block.block_height) { @@ -4483,8 +3891,8 @@ pub mod test { } }; - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); ( vec![ BlockstackOperationType::LeaderKeyRegister(leader_key_op), @@ -4503,26 +3911,29 @@ pub mod test { StacksBlock, Vec, ) { - let sortdb = self.sortdb.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); let mut burn_block = { let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); TestBurnchainBlock::new(&sn, 0) }; - let mut stacks_node = self.stacks_node.take().unwrap(); + let mut stacks_node = self.chain.stacks_node.take().unwrap(); - let parent_block_opt = stacks_node.get_last_anchored_block(&self.miner); - let parent_microblock_header_opt = - get_last_microblock_header(&stacks_node, &self.miner, parent_block_opt.as_ref()); - let last_key = stacks_node.get_last_key(&self.miner); + let parent_block_opt = stacks_node.get_last_anchored_block(&self.chain.miner); + let parent_microblock_header_opt = get_last_microblock_header( + &stacks_node, + &self.chain.miner, + parent_block_opt.as_ref(), + ); + let last_key = stacks_node.get_last_key(&self.chain.miner); - let network_id = self.config.network_id; - let chainstate_path = self.chainstate_path.clone(); + let network_id = self.config.chain_config.network_id; + let chainstate_path = self.chain.chainstate_path.clone(); let burn_block_height = burn_block.block_height; let (stacks_block, microblocks, block_commit_op) = stacks_node.mine_stacks_block( &sortdb, - &mut self.miner, + &mut self.chain.miner, &mut burn_block, &last_key, parent_block_opt.as_ref(), @@ -4554,10 +3965,11 @@ pub mod test { }, ); - let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + let leader_key_op = + stacks_node.add_key_register(&mut burn_block, &mut self.chain.miner); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + self.chain.stacks_node = Some(stacks_node); + self.chain.sortdb = Some(sortdb); ( vec![ BlockstackOperationType::LeaderKeyRegister(leader_key_op), @@ -4586,17 +3998,17 @@ pub mod test { } pub fn get_burnchain_view(&mut self) -> Result { - let sortdb = self.sortdb.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); let view_res = { let chaintip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.index_conn()).unwrap(); SortitionDB::get_burnchain_view( &sortdb.index_conn(), - &self.config.burnchain, + &self.config.chain_config.burnchain, &chaintip, ) }; - self.sortdb = Some(sortdb); + self.chain.sortdb = Some(sortdb); view_res } @@ -4617,9 +4029,9 @@ pub mod test { pub fn make_client_convo(&self) -> ConversationP2P { ConversationP2P::new( - self.config.network_id, + self.config.chain_config.network_id, self.config.peer_version, - &self.config.burnchain, + &self.config.chain_config.burnchain, &SocketAddr::new( IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), self.config.server_port, @@ -4628,6 +4040,7 @@ pub mod test { false, 0, self.config + .chain_config .epochs .clone() .unwrap_or(StacksEpoch::unit_test_3_0(0)), @@ -4636,7 +4049,7 @@ pub mod test { pub fn make_client_local_peer(&self, privk: StacksPrivateKey) -> LocalPeer { LocalPeer::new( - self.config.network_id, + self.config.chain_config.network_id, self.network.local_peer.parent_network_id, PeerAddress::from_socketaddr(&SocketAddr::new( IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -4653,7 +4066,11 @@ pub mod test { pub fn get_burn_block_height(&self) -> u64 { SortitionDB::get_canonical_burn_chain_tip( - self.sortdb.as_ref().expect("Failed to get sortdb").conn(), + self.chain + .sortdb + .as_ref() + .expect("Failed to get sortdb") + .conn(), ) .expect("Failed to get canonical burn chain tip") .block_height @@ -4662,6 +4079,7 @@ pub mod test { pub fn get_reward_cycle(&self) -> u64 { let block_height = self.get_burn_block_height(); self.config + .chain_config .burnchain .block_height_to_reward_cycle(block_height) .unwrap_or_else(|| { @@ -4671,8 +4089,8 @@ pub mod test { /// Verify that the sortition DB migration into Nakamoto worked correctly. pub fn check_nakamoto_migration(&mut self) { - let mut sortdb = self.sortdb.take().unwrap(); - let mut node = self.stacks_node.take().unwrap(); + let mut sortdb = self.chain.sortdb.take().unwrap(); + let mut node = self.chain.stacks_node.take().unwrap(); let chainstate = &mut node.chainstate; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -4776,8 +4194,8 @@ pub mod test { tx.commit().unwrap(); let migrator = SortitionDBMigrator::new( - self.config.burnchain.clone(), - &self.chainstate_path, + self.config.chain_config.burnchain.clone(), + &self.chain.chainstate_path, None, ) .unwrap(); @@ -4826,8 +4244,8 @@ pub mod test { assert_eq!(restored_chain_tips, all_chain_tips); assert_eq!(restored_reward_sets, all_preprocessed_reward_sets); - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); } /// Verify that all malleablized blocks are duly processed @@ -4836,7 +4254,7 @@ pub mod test { all_blocks: Vec, expected_siblings: usize, ) { - if !self.mine_malleablized_blocks { + if !self.chain.mine_malleablized_blocks { return; } for block in all_blocks.iter() { @@ -4866,12 +4284,12 @@ pub mod test { /// Set the nakamoto tenure to mine on pub fn mine_nakamoto_on(&mut self, parent_tenure: Vec) { - self.nakamoto_parent_tenure_opt = Some(parent_tenure); + self.chain.nakamoto_parent_tenure_opt = Some(parent_tenure); } /// Clear the tenure to mine on. This causes the miner to build on the canonical tip pub fn mine_nakamoto_on_canonical_tip(&mut self) { - self.nakamoto_parent_tenure_opt = None; + self.chain.nakamoto_parent_tenure_opt = None; } /// Get an account off of a tip @@ -4880,8 +4298,13 @@ pub mod test { tip: &StacksBlockId, account: &PrincipalData, ) -> StacksAccount { - let sortdb = self.sortdb.take().expect("FATAL: sortdb not restored"); + let sortdb = self + .chain + .sortdb + .take() + .expect("FATAL: sortdb not restored"); let mut node = self + .chain .stacks_node .take() .expect("FATAL: chainstate not restored"); @@ -4896,8 +4319,8 @@ pub mod test { .unwrap() .unwrap(); - self.sortdb = Some(sortdb); - self.stacks_node = Some(node); + self.chain.sortdb = Some(sortdb); + self.chain.stacks_node = Some(node); acct } } diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 83941a33cc4..df43b11c44d 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -681,8 +681,8 @@ mod test { let view = peer.get_burnchain_view().unwrap(); let (http_sx, http_rx) = sync_channel(1); - let network_id = peer.config.network_id; - let chainstate_path = peer.chainstate_path.clone(); + let network_id = peer.config.chain_config.network_id; + let chainstate_path = peer.chain.chainstate_path.clone(); let (num_events_sx, num_events_rx) = sync_channel(1); let http_thread = thread::spawn(move || { diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index 853de5da3ce..8440954eff6 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -87,7 +87,7 @@ fn setup_peer_config( conf.connection_opts.disable_block_download = true; let j = i as u32; - conf.burnchain.peer_version = PEER_VERSION_TESTNET | (j << 16) | (j << 8) | j; // different non-major versions for each peer + conf.chain_config.burnchain.peer_version = PEER_VERSION_TESTNET | (j << 16) | (j << 8) | j; // different non-major versions for each peer // even-number peers support stacker DBs. // odd-number peers do not diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index c07f9c73c75..a5f93b562eb 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -29,6 +29,7 @@ use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::tests::*; use crate::chainstate::stacks::*; +use crate::chainstate::tests::TestChainstate; use crate::net::download::BlockDownloader; use crate::net::test::*; use crate::net::*; @@ -77,16 +78,21 @@ fn test_get_block_availability() { peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let reward_cycle_length = peer_1_config.burnchain.pox_constants.reward_cycle_length as u64; + let reward_cycle_length = peer_1_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length as u64; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); let num_blocks = 10; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer_1.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -99,23 +105,25 @@ fn test_get_block_availability() { peer_2.next_burnchain_block(burn_ops.clone()); peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); // We do not have the anchor block for peer 1, therefore it cannot advance its tip. if i < 6 { peer_1.next_burnchain_block_raw(burn_ops); } - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_2.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer_2.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); block_data.push((sn.consensus_hash.clone(), stacks_block, microblocks)); } let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - peer_1.config.burnchain.first_block_height + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer_1.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height - peer_1.config.chain_config.burnchain.first_block_height }; let mut round = 0; @@ -208,7 +216,7 @@ fn test_get_block_availability() { fn get_blocks_inventory(peer: &TestPeer, start_height: u64, end_height: u64) -> BlocksInvData { let block_hashes = { let num_headers = end_height - start_height; - let ic = peer.sortdb.as_ref().unwrap().index_conn(); + let ic = peer.chain.sortdb.as_ref().unwrap().index_conn(); let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); let ancestor = SortitionDB::get_ancestor_snapshot(&ic, end_height, &tip.sortition_id) .unwrap() @@ -262,7 +270,7 @@ where port_base + ((2 * i) as u16), port_base + ((2 * i + 1) as u16), ); - peer_config.burnchain.first_block_height = first_sortition_height; + peer_config.chain_config.burnchain.first_block_height = first_sortition_height; peer_configs.push(peer_config); } @@ -273,9 +281,10 @@ where let mut num_blocks = 10; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peers[0].chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -283,9 +292,10 @@ where num_blocks = block_data.len(); let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peers[0].chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -339,7 +349,7 @@ where peer.with_peer_state(|peer, sortdb, chainstate, mempool| { for i in 0..(result.blocks.len() + result.confirmed_microblocks.len() + 1) { - peer.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); let pox_id = { let ic = sortdb.index_conn(); @@ -363,9 +373,10 @@ where assert!(check_breakage(peer)); let peer_num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); sn.block_height }; @@ -527,12 +538,12 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { peers[1].next_burnchain_block(burn_ops.clone()); peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -667,18 +678,24 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { // peer[1] has a big initial balance let initial_balances = vec![( - PrincipalData::from(peer_configs[1].spending_account.origin_address().unwrap()), + PrincipalData::from( + peer_configs[1] + .chain_config + .spending_account + .origin_address() + .unwrap(), + ), 1_000_000_000_000_000, )]; - peer_configs[0].initial_balances = initial_balances.clone(); - peer_configs[1].initial_balances = initial_balances; + peer_configs[0].chain_config.initial_balances = initial_balances.clone(); + peer_configs[1].chain_config.initial_balances = initial_balances; }, |num_blocks, ref mut peers| { // build up block data to replicate let mut block_data = vec![]; - let spending_account = &mut peers[1].config.spending_account.clone(); - let burnchain = peers[1].config.burnchain.clone(); + let spending_account = &mut peers[1].config.chain_config.spending_account.clone(); + let burnchain = peers[1].config.chain_config.burnchain.clone(); // function to make a tenure in which a the peer's miner stacks its STX let mut make_stacking_tenure = |miner: &mut TestMiner, @@ -804,12 +821,12 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { peers[1].next_burnchain_block(burn_ops.clone()); peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -890,14 +907,14 @@ pub fn test_get_blocks_and_microblocks_5_peers_star() { peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -961,14 +978,14 @@ pub fn test_get_blocks_and_microblocks_5_peers_line() { peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1040,14 +1057,14 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1116,14 +1133,14 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_sockets() { peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), + peers[0].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1203,12 +1220,12 @@ pub fn test_get_blocks_and_microblocks_ban_url() { peers[1].next_burnchain_block(burn_ops.clone()); peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); block_data.push(( @@ -1303,7 +1320,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc let signed_tx = sign_standard_singlesig_tx( next_microblock_payload, - &peers[1].miner.privks[0], + &peers[1].chain.miner.privks[0], last_nonce + 1, 0, ); @@ -1317,7 +1334,15 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc mblock.header.sequence += 1; mblock .header - .sign(peers[1].miner.microblock_privks.last().as_ref().unwrap()) + .sign( + peers[1] + .chain + .miner + .microblock_privks + .last() + .as_ref() + .unwrap(), + ) .unwrap(); microblocks.push(mblock); @@ -1328,12 +1353,12 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc peers[1].process_stacks_epoch(&stacks_block, &consensus_hash, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); @@ -1348,12 +1373,12 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc } else { test_debug!("Build child block {}", i); let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); - let chainstate_path = peers[1].chainstate_path.clone(); - let burnchain = peers[1].config.burnchain.clone(); + let chainstate_path = peers[1].chain.chainstate_path.clone(); + let burnchain = peers[1].config.chain_config.burnchain.clone(); let (mut burn_ops, stacks_block, _) = peers[1].make_tenure( |ref mut miner, @@ -1418,12 +1443,12 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc peers[1].process_stacks_epoch(&stacks_block, &consensus_hash, &[]); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); peers[0].next_burnchain_block_raw(burn_ops); let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[1].sortdb.as_ref().unwrap().conn(), + peers[1].chain.sortdb.as_ref().unwrap().conn(), ) .unwrap(); diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index c09020d6d86..d9a3274e2bd 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -39,6 +39,7 @@ use crate::chainstate::stacks::{ TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionVersion, }; +use crate::chainstate::tests::TestChainstate; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::test_util::to_addr; use crate::net::api::gettenureinfo::RPCGetTenureInfo; @@ -48,7 +49,7 @@ use crate::net::test::{dns_thread_start, TestEventObserver}; use crate::net::tests::inv::nakamoto::{ make_nakamoto_peer_from_invs, make_nakamoto_peers_from_invs_ext, peer_get_nakamoto_invs, }; -use crate::net::tests::{NakamotoBootPlan, TestPeer}; +use crate::net::tests::NakamotoBootPlan; use crate::net::{Error as NetError, Hash160, NeighborAddress, SortitionDB}; use crate::stacks_common::types::Address; use crate::util_lib::db::Error as DBError; @@ -400,10 +401,11 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let peer = make_nakamoto_peer_from_invs(function_name!(), &observer, rc_len as u32, 3, bitvecs); let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - peer.mine_malleablized_blocks = false; + peer.chain.mine_malleablized_blocks = false; - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); @@ -606,7 +608,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -617,7 +619,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -681,7 +683,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -692,7 +694,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -780,7 +782,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -791,7 +793,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -878,7 +880,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -889,7 +891,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -955,7 +957,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -966,7 +968,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -1018,7 +1020,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { reward_cycle: tip_rc, }; - let sortdb = peer.sortdb.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); utd.try_accept_tenure_info( &sortdb, @@ -1029,7 +1031,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ) .unwrap(); - peer.sortdb = Some(sortdb); + peer.chain.sortdb = Some(sortdb); assert!(utd.unconfirmed_tenure_start_block.is_some()); @@ -1325,8 +1327,9 @@ fn test_make_tenure_downloaders() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); @@ -2107,8 +2110,9 @@ fn test_nakamoto_download_run_2_peers() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); @@ -2147,14 +2151,18 @@ fn test_nakamoto_download_run_2_peers() { &sn.burn_header_hash ); test_debug!("ops = {:?}", &ops); - let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + let block_header = TestChainstate::make_next_burnchain_block( + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestChainstate::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2216,8 +2224,9 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); @@ -2254,14 +2263,18 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { &sn.burn_header_hash ); test_debug!("ops = {:?}", &ops); - let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + let block_header = TestChainstate::make_next_burnchain_block( + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestChainstate::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2336,8 +2349,9 @@ fn test_nakamoto_microfork_download_run_2_peers() { }); peer.refresh_burnchain_view(); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); // create a microfork let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); @@ -2434,14 +2448,18 @@ fn test_nakamoto_microfork_download_run_2_peers() { &sn.burn_header_hash ); test_debug!("ops = {:?}", &ops); - let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + let block_header = TestChainstate::make_next_burnchain_block( + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestChainstate::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2513,8 +2531,9 @@ fn test_nakamoto_download_run_2_peers_with_one_shadow_block() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); // create a shadow block let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); @@ -2609,22 +2628,26 @@ fn test_nakamoto_download_run_2_peers_with_one_shadow_block() { &sn.burn_header_hash ); test_debug!("ops = {:?}", &ops); - let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + let block_header = TestChainstate::make_next_burnchain_block( + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestChainstate::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } { - let mut node = boot_peer.stacks_node.take().unwrap(); + let mut node = boot_peer.chain.stacks_node.take().unwrap(); let tx = node.chainstate.staging_db_tx_begin().unwrap(); tx.add_shadow_block(&shadow_block).unwrap(); tx.commit().unwrap(); - boot_peer.stacks_node = Some(node); + boot_peer.chain.stacks_node = Some(node); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2693,8 +2716,9 @@ fn test_nakamoto_download_run_2_peers_shadow_prepare_phase() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); // create a shadow block let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); @@ -2811,23 +2835,27 @@ fn test_nakamoto_download_run_2_peers_shadow_prepare_phase() { &sn.burn_header_hash ); test_debug!("ops = {:?}", &ops); - let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + let block_header = TestChainstate::make_next_burnchain_block( + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestChainstate::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } { - let mut node = boot_peer.stacks_node.take().unwrap(); + let mut node = boot_peer.chain.stacks_node.take().unwrap(); let tx = node.chainstate.staging_db_tx_begin().unwrap(); for shadow_block in shadow_blocks.into_iter() { tx.add_shadow_block(&shadow_block).unwrap(); } tx.commit().unwrap(); - boot_peer.stacks_node = Some(node); + boot_peer.chain.stacks_node = Some(node); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); @@ -2896,8 +2924,9 @@ fn test_nakamoto_download_run_2_peers_shadow_reward_cycles() { let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); // create a shadow block let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); @@ -3015,24 +3044,28 @@ fn test_nakamoto_download_run_2_peers_shadow_reward_cycles() { sn.block_height, &sn.burn_header_hash ); - test_debug!("ops = {:?}", &ops); - let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, + test_debug!("ops = {ops:?}"); + let block_header = TestChainstate::make_next_burnchain_block( + &boot_peer.config.chain_config.burnchain, sn.block_height, &sn.burn_header_hash, ops.len() as u64, false, ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + TestChainstate::add_burnchain_block( + &boot_peer.config.chain_config.burnchain, + &block_header, + ops.clone(), + ); } { - let mut node = boot_peer.stacks_node.take().unwrap(); + let mut node = boot_peer.chain.stacks_node.take().unwrap(); let tx = node.chainstate.staging_db_tx_begin().unwrap(); for shadow_block in shadow_blocks.into_iter() { tx.add_shadow_block(&shadow_block).unwrap(); } tx.commit().unwrap(); - boot_peer.stacks_node = Some(node); + boot_peer.chain.stacks_node = Some(node); } let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index 1d2ff1a1a6a..8eff556e050 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -23,6 +23,7 @@ use crate::burnchains::db::BurnchainHeaderReader; use crate::burnchains::tests::BURNCHAIN_TEST_BLOCK_TIME; use crate::burnchains::{Burnchain, BurnchainBlockHeader, BurnchainView, PoxConstants}; use crate::chainstate::coordinator::tests::get_burnchain; +use crate::chainstate::tests::TestChainstate; use crate::net::chat::ConversationP2P; use crate::net::inv::inv2x::*; use crate::net::test::*; @@ -514,14 +515,14 @@ fn test_sync_inv_set_blocks_microblocks_available() { let mut peer_1 = TestPeer::new(peer_1_config.clone()); let mut peer_2 = TestPeer::new(peer_2_config.clone()); - let peer_1_test_path = TestPeer::make_test_path(&peer_1.config); - let peer_2_test_path = TestPeer::make_test_path(&peer_2.config); + let peer_1_test_path = TestChainstate::make_test_path(&peer_1.config.chain_config); + let peer_2_test_path = TestChainstate::make_test_path(&peer_2.config.chain_config); assert!(peer_1_test_path != peer_2_test_path); for (test_path, burnchain) in [ - (peer_1_test_path, &mut peer_1.config.burnchain), - (peer_2_test_path, &mut peer_2.config.burnchain), + (peer_1_test_path, &mut peer_1.config.chain_config.burnchain), + (peer_2_test_path, &mut peer_2.config.chain_config.burnchain), ] .iter_mut() { @@ -566,22 +567,21 @@ fn test_sync_inv_set_blocks_microblocks_available() { burnchain.first_block_hash = hdr.block_hash; } - peer_1_config.burnchain.first_block_height = 5; - peer_2_config.burnchain.first_block_height = 5; - peer_1.config.burnchain.first_block_height = 5; - peer_2.config.burnchain.first_block_height = 5; + peer_1_config.chain_config.burnchain.first_block_height = 5; + peer_2_config.chain_config.burnchain.first_block_height = 5; + peer_1.config.chain_config.burnchain.first_block_height = 5; + peer_2.config.chain_config.burnchain.first_block_height = 5; assert_eq!( - peer_1_config.burnchain.first_block_hash, - peer_2_config.burnchain.first_block_hash + peer_1_config.chain_config.burnchain.first_block_hash, + peer_2_config.chain_config.burnchain.first_block_hash ); - let burnchain = peer_1_config.burnchain; + let burnchain = peer_1_config.chain_config.burnchain; let num_blocks = 5; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height }; @@ -594,15 +594,15 @@ fn test_sync_inv_set_blocks_microblocks_available() { } let (tip, num_burn_blocks) = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - let num_burn_blocks = sn.block_height - peer_1.config.burnchain.first_block_height; + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); + let num_burn_blocks = + sn.block_height - peer_1.config.chain_config.burnchain.first_block_height; (sn, num_burn_blocks) }; let nk = peer_1.to_neighbor().addr; - let sortdb = peer_1.sortdb.take().unwrap(); + let sortdb = peer_1.chain.sortdb.take().unwrap(); peer_1.network.init_inv_sync_epoch2x(&sortdb); match peer_1.network.inv_state { Some(ref mut inv) => { @@ -612,10 +612,10 @@ fn test_sync_inv_set_blocks_microblocks_available() { panic!("No inv state"); } }; - peer_1.sortdb = Some(sortdb); + peer_1.chain.sortdb = Some(sortdb); for i in 0..num_blocks { - let sortdb = peer_1.sortdb.take().unwrap(); + let sortdb = peer_1.chain.sortdb.take().unwrap(); let sn = { let ic = sortdb.index_conn(); let sn = SortitionDB::get_ancestor_snapshot( @@ -625,14 +625,14 @@ fn test_sync_inv_set_blocks_microblocks_available() { ) .unwrap() .unwrap(); - eprintln!("{:?}", &sn); + eprintln!("{sn:?}"); sn }; - peer_1.sortdb = Some(sortdb); + peer_1.chain.sortdb = Some(sortdb); } for i in 0..num_blocks { - let sortdb = peer_1.sortdb.take().unwrap(); + let sortdb = peer_1.chain.sortdb.take().unwrap(); match peer_1.network.inv_state { Some(ref mut inv) => { assert!(!inv @@ -657,7 +657,7 @@ fn test_sync_inv_set_blocks_microblocks_available() { ) .unwrap() .unwrap(); - eprintln!("{:?}", &sn); + eprintln!("{sn:?}"); sn }; @@ -733,7 +733,7 @@ fn test_sync_inv_set_blocks_microblocks_available() { panic!("No inv state"); } } - peer_1.sortdb = Some(sortdb); + peer_1.chain.sortdb = Some(sortdb); } } @@ -741,17 +741,25 @@ fn test_sync_inv_set_blocks_microblocks_available() { fn test_sync_inv_make_inv_messages() { let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); - let indexer = BitcoinIndexer::new_unit_test(&peer_1_config.burnchain.working_dir); - let reward_cycle_length = peer_1_config.burnchain.pox_constants.reward_cycle_length; - let num_blocks = peer_1_config.burnchain.pox_constants.reward_cycle_length * 2; + let indexer = BitcoinIndexer::new_unit_test(&peer_1_config.chain_config.burnchain.working_dir); + let reward_cycle_length = peer_1_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length; + let num_blocks = peer_1_config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length + * 2; assert_eq!(reward_cycle_length, 5); let mut peer_1 = TestPeer::new(peer_1_config); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height }; @@ -763,9 +771,9 @@ fn test_sync_inv_make_inv_messages() { } let (tip, num_burn_blocks) = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - let num_burn_blocks = sn.block_height - peer_1.config.burnchain.first_block_height; + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); + let num_burn_blocks = + sn.block_height - peer_1.config.chain_config.burnchain.first_block_height; (sn, num_burn_blocks) }; @@ -1249,42 +1257,42 @@ fn test_inv_sync_start_reward_cycle() { let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 7); peer_1.network.connection_opts.inv_reward_cycles = 1; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 7); peer_1.network.connection_opts.inv_reward_cycles = 2; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 6); peer_1.network.connection_opts.inv_reward_cycles = 3; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 5); peer_1.network.connection_opts.inv_reward_cycles = 300; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 10); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 10); assert_eq!(block_scan_start, 0); peer_1.network.connection_opts.inv_reward_cycles = 0; let block_scan_start = peer_1 .network - .get_block_scan_start(peer_1.sortdb.as_ref().unwrap(), 1); + .get_block_scan_start(peer_1.chain.sortdb.as_ref().unwrap(), 1); assert_eq!(block_scan_start, 1); } @@ -1339,9 +1347,7 @@ fn test_sync_inv_2_peers_plain() { let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; @@ -1356,9 +1362,7 @@ fn test_sync_inv_2_peers_plain() { } let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; @@ -1510,9 +1514,7 @@ fn test_sync_inv_2_peers_stale() { let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; @@ -1552,7 +1554,8 @@ fn test_sync_inv_2_peers_stale() { if let Some(peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { if peer_2_inv.inv.num_sortitions - == first_stacks_block_height - peer_1.config.burnchain.first_block_height + == first_stacks_block_height + - peer_1.config.chain_config.burnchain.first_block_height { for i in 0..first_stacks_block_height { assert!(!peer_2_inv.inv.has_ith_block(i)); @@ -1571,7 +1574,8 @@ fn test_sync_inv_2_peers_stale() { if let Some(peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { if peer_1_inv.inv.num_sortitions - == first_stacks_block_height - peer_1.config.burnchain.first_block_height + == first_stacks_block_height + - peer_1.config.chain_config.burnchain.first_block_height { peer_1_check = true; } @@ -1600,7 +1604,7 @@ fn test_sync_inv_2_peers_unstable() { peer_1_config.connection_opts.inv_reward_cycles = 10; peer_2_config.connection_opts.inv_reward_cycles = 10; - let stable_confs = peer_1_config.burnchain.stable_confirmations as u64; + let stable_confs = peer_1_config.chain_config.burnchain.stable_confirmations as u64; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -1611,9 +1615,7 @@ fn test_sync_inv_2_peers_unstable() { let num_blocks = GETPOXINV_MAX_BITLEN * 2; let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; @@ -1625,7 +1627,7 @@ fn test_sync_inv_2_peers_unstable() { peer_2.next_burnchain_block(burn_ops.clone()); peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); // NOTE: the nodes only differ by one block -- they agree on the same PoX vector if i + 1 < num_blocks { @@ -1641,20 +1643,18 @@ fn test_sync_inv_2_peers_unstable() { // tips must differ { let sn1 = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - let sn2 = - SortitionDB::get_canonical_burn_chain_tip(peer_2.sortdb.as_ref().unwrap().conn()) - .unwrap(); + SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); + let sn2 = SortitionDB::get_canonical_burn_chain_tip( + peer_2.chain.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); assert_ne!(sn1.burn_header_hash, sn2.burn_header_hash); } let num_stable_blocks = num_blocks - stable_confs; let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb_ref().conn()).unwrap(); sn.block_height + 1 }; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index a510c992c82..9618cea64b9 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -153,7 +153,7 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { // sanity check -- nakamoto begins at height 37 assert_eq!( - peer.config.epochs, + peer.config.chain_config.epochs, Some(StacksEpoch::unit_test_3_0_only(37)) ); @@ -161,8 +161,8 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); assert_eq!(reward_cycle_invs.len(), 10); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); @@ -235,7 +235,7 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { // sanity check -- nakamoto begins at height 37 assert_eq!( - peer.config.epochs, + peer.config.chain_config.epochs, Some(StacksEpoch::unit_test_3_0_only(37)) ); @@ -243,8 +243,8 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); assert_eq!(reward_cycle_invs.len(), 8); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); @@ -310,7 +310,7 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { // sanity check -- nakamoto begins at height 37 assert_eq!( - peer.config.epochs, + peer.config.chain_config.epochs, Some(StacksEpoch::unit_test_3_0_only(37)) ); @@ -318,8 +318,8 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); assert_eq!(reward_cycle_invs.len(), 10); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); @@ -624,8 +624,9 @@ fn test_nakamoto_invs_full() { let (peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); eprintln!("{:#?}", &reward_cycle_invs); assert_eq!(reward_cycle_invs.len(), 10); @@ -657,8 +658,9 @@ fn test_nakamoto_invs_alternating() { let (peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); eprintln!("{:#?}", &reward_cycle_invs); assert_eq!(reward_cycle_invs.len(), 10); @@ -696,10 +698,11 @@ fn test_nakamoto_invs_sparse() { let (peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); - eprintln!("{:#?}", &reward_cycle_invs); + eprintln!("{reward_cycle_invs:#?}"); assert_eq!(reward_cycle_invs.len(), 12); check_inv_messages(bitvecs, 10, nakamoto_start, reward_cycle_invs); } @@ -731,8 +734,9 @@ fn test_nakamoto_invs_different_anchor_blocks() { let (peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); eprintln!("{:#?}", &reward_cycle_invs); assert_eq!(reward_cycle_invs.len(), 12); @@ -871,15 +875,17 @@ fn test_nakamoto_inv_sync_state_machine() { make_nakamoto_peers_from_invs(function_name!(), &observer, 10, 3, bitvecs.clone(), 1); let mut other_peer = other_peers.pop().unwrap(); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; let total_rcs = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -909,11 +915,11 @@ fn test_nakamoto_inv_sync_state_machine() { // `observer` std::thread::scope(|s| { s.spawn(|| { - let sortdb = other_peer.sortdb.take().unwrap(); + let sortdb = other_peer.chain.sortdb.take().unwrap(); inv_machine .process_getnakamotoinv_begins(&mut other_peer.network, &sortdb, false) .unwrap(); - other_peer.sortdb = Some(sortdb); + other_peer.chain.sortdb = Some(sortdb); let mut last_learned_rc = 0; loop { @@ -942,11 +948,11 @@ fn test_nakamoto_inv_sync_state_machine() { break; } - let sortdb = other_peer.sortdb.take().unwrap(); + let sortdb = other_peer.chain.sortdb.take().unwrap(); inv_machine .process_getnakamotoinv_begins(&mut other_peer.network, &sortdb, false) .unwrap(); - other_peer.sortdb = Some(sortdb); + other_peer.chain.sortdb = Some(sortdb); } sx.send(true).unwrap(); @@ -995,15 +1001,17 @@ fn test_nakamoto_inv_sync_across_epoch_change() { make_nakamoto_peers_from_invs(function_name!(), &observer, 10, 3, bitvecs, 1); let mut other_peer = other_peers.pop().unwrap(); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; let total_rcs = peer .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1135,7 +1143,7 @@ fn test_nakamoto_make_tenure_inv_in_forks() { initial_balances, ); peer.refresh_burnchain_view(); - peer.mine_malleablized_blocks = false; + peer.chain.mine_malleablized_blocks = false; let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); @@ -1766,7 +1774,7 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { initial_balances, ); peer.refresh_burnchain_view(); - peer.mine_malleablized_blocks = false; + peer.chain.mine_malleablized_blocks = false; let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); @@ -2274,7 +2282,7 @@ fn test_nakamoto_make_tenure_inv_from_old_tips() { initial_balances, ); peer.refresh_burnchain_view(); - peer.mine_malleablized_blocks = false; + peer.chain.mine_malleablized_blocks = false; let sortdb = peer.sortdb_ref().reopen().unwrap(); let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); @@ -2371,8 +2379,9 @@ fn test_nakamoto_invs_shadow_blocks() { 0, initial_balances, ); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer.config.chain_config.burnchain.pox_constants, + ); let mut expected_ids = vec![]; diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index de617001b76..e3acb17211e 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -48,8 +48,8 @@ fn test_mempool_sync_2_peers() { .map(|a| (a.to_account_principal(), 1000000000)) .collect(); - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances; + peer_1_config.chain_config.initial_balances = initial_balances.clone(); + peer_2_config.chain_config.initial_balances = initial_balances; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -59,8 +59,9 @@ fn test_mempool_sync_2_peers() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -154,8 +155,9 @@ fn test_mempool_sync_2_peers() { } let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -314,8 +316,8 @@ fn test_mempool_sync_2_peers_paginated() { .map(|a| (a.to_account_principal(), 1000000000)) .collect(); - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances; + peer_1_config.chain_config.initial_balances = initial_balances.clone(); + peer_2_config.chain_config.initial_balances = initial_balances; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -325,8 +327,9 @@ fn test_mempool_sync_2_peers_paginated() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -408,8 +411,9 @@ fn test_mempool_sync_2_peers_paginated() { peer_1.mempool = Some(peer_1_mempool); let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -503,8 +507,8 @@ fn test_mempool_sync_2_peers_blacklisted() { .map(|a| (a.to_account_principal(), 1000000000)) .collect(); - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances; + peer_1_config.chain_config.initial_balances = initial_balances.clone(); + peer_2_config.chain_config.initial_balances = initial_balances; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -514,8 +518,9 @@ fn test_mempool_sync_2_peers_blacklisted() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -615,8 +620,9 @@ fn test_mempool_sync_2_peers_blacklisted() { peer_2.mempool = Some(peer_2_mempool); let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -712,8 +718,8 @@ fn test_mempool_sync_2_peers_problematic() { .map(|a| (a.to_account_principal(), 1000000000)) .collect(); - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances; + peer_1_config.chain_config.initial_balances = initial_balances.clone(); + peer_2_config.chain_config.initial_balances = initial_balances; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -723,8 +729,9 @@ fn test_mempool_sync_2_peers_problematic() { let num_blocks = 10; let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -753,7 +760,7 @@ fn test_mempool_sync_2_peers_problematic() { let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx = make_contract_tx( pk, @@ -801,8 +808,9 @@ fn test_mempool_sync_2_peers_problematic() { peer_2.mempool = Some(peer_2_mempool); let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; @@ -909,7 +917,7 @@ pub fn test_mempool_storage_nakamoto() { let mut total_blocks = 0; let mut all_txs = vec![]; - let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_key = peer.chain.miner.nakamoto_miner_key(); let stx_miner_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -919,8 +927,12 @@ pub fn test_mempool_storage_nakamoto() { .unwrap(); // duplicate handles to the chainstates so we can submit txs - let mut mempool = - MemPoolDB::open_test(false, peer.config.network_id, &peer.chainstate_path).unwrap(); + let mut mempool = MemPoolDB::open_test( + false, + peer.config.chain_config.network_id, + &peer.chain.chainstate_path, + ) + .unwrap(); let (mut chainstate, _) = peer.chainstate().reopen().unwrap(); let sortdb = peer.sortdb().reopen().unwrap(); @@ -935,9 +947,10 @@ pub fn test_mempool_storage_nakamoto() { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("Next burnchain block: {}", &consensus_hash); @@ -1017,8 +1030,8 @@ pub fn test_mempool_storage_nakamoto() { } let tip = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) .unwrap() .unwrap() @@ -1092,15 +1105,17 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { ); let mut peer_2 = other_peers.pop().unwrap(); - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer_1.config.burnchain.pox_constants); + let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height( + &peer_1.config.chain_config.burnchain.pox_constants, + ); let tip = { - let sort_db = peer_1.sortdb.as_mut().unwrap(); + let sort_db = peer_1.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; let total_rcs = peer_1 .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1196,8 +1211,9 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { peer_1.mempool = Some(peer_1_mempool); let num_burn_blocks = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peer_1.chain.sortdb.as_ref().unwrap().conn()) + .unwrap(); sn.block_height + 1 }; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 418dd0e6523..f22a0efbf7b 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -25,6 +25,8 @@ pub mod relay; use std::collections::{HashMap, HashSet}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use clarity::types::EpochList; +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libstackerdb::StackerDBChunkData; use rand::Rng; @@ -35,7 +37,7 @@ use stacks_common::types::chainstate::{ StacksPublicKey, TrieHash, }; use stacks_common::types::net::PeerAddress; -use stacks_common::types::{Address, StacksEpochId}; +use stacks_common::types::Address; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; @@ -48,8 +50,7 @@ use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use crate::chainstate::stacks::address::PoxAddress; -use crate::chainstate::stacks::boot::test::{key_to_stacks_addr, make_pox_4_lockup_chain_id}; +use crate::chainstate::stacks::boot::test::key_to_stacks_addr; use crate::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; @@ -61,6 +62,7 @@ use crate::chainstate::stacks::{ TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, TransactionVersion, }; +use crate::chainstate::tests::{TestChainstate, TestChainstateConfig}; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::{StacksEpoch, StacksEpochExtension}; use crate::net::relay::Relayer; @@ -70,7 +72,6 @@ use crate::net::{ PingData, StackerDBPushChunkData, StacksMessage, StacksMessageType, StacksNodeState, }; use crate::util_lib::boot::boot_code_id; -use crate::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; /// One step of a simulated Nakamoto node's bootup procedure. #[derive(Debug, PartialEq, Clone)] @@ -100,14 +101,16 @@ pub struct NakamotoBootPlan { pub malleablized_blocks: bool, pub network_id: u32, pub txindex: bool, + pub epochs: Option>, } impl NakamotoBootPlan { pub fn new(test_name: &str) -> Self { let (test_signers, test_stackers) = TestStacker::common_signing_set(); + let default_config = TestChainstateConfig::default(); Self { test_name: test_name.to_string(), - pox_constants: TestPeerConfig::default().burnchain.pox_constants, + pox_constants: default_config.burnchain.pox_constants, private_key: StacksPrivateKey::from_seed(&[2]), initial_balances: vec![], test_stackers, @@ -116,11 +119,66 @@ impl NakamotoBootPlan { num_peers: 0, add_default_balance: true, malleablized_blocks: true, - network_id: TestPeerConfig::default().network_id, + network_id: default_config.network_id, txindex: false, + epochs: None, } } + // Builds a TestChainstateConfig with shared parameters + fn build_nakamoto_chainstate_config(&self) -> TestChainstateConfig { + let mut chainstate_config = TestChainstateConfig::new(&self.test_name); + chainstate_config.network_id = self.network_id; + chainstate_config.txindex = self.txindex; + + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&self.private_key)], + ) + .unwrap(); + + let default_epoch = StacksEpoch::unit_test_3_0_only( + (self.pox_constants.pox_4_activation_height + + self.pox_constants.reward_cycle_length + + 1) + .into(), + ); + chainstate_config.epochs = Some(self.epochs.clone().unwrap_or(default_epoch)); + chainstate_config.initial_balances = vec![]; + if self.add_default_balance { + chainstate_config + .initial_balances + .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); + } + chainstate_config + .initial_balances + .extend(self.initial_balances.clone()); + + let fee_payment_balance = 10_000; + let stacker_balances = self.test_stackers.iter().map(|test_stacker| { + ( + PrincipalData::from(key_to_stacks_addr(&test_stacker.stacker_private_key)), + u64::try_from(test_stacker.amount).expect("Stacking amount too large"), + ) + }); + let signer_balances = self.test_stackers.iter().map(|test_stacker| { + ( + PrincipalData::from(key_to_stacks_addr(&test_stacker.signer_private_key)), + fee_payment_balance, + ) + }); + + chainstate_config.initial_balances.extend(stacker_balances); + chainstate_config.initial_balances.extend(signer_balances); + chainstate_config.test_signers = Some(self.test_signers.clone()); + chainstate_config.test_stackers = Some(self.test_stackers.clone()); + chainstate_config.burnchain.pox_constants = self.pox_constants.clone(); + + chainstate_config + } + pub fn with_private_key(mut self, privk: StacksPrivateKey) -> Self { self.private_key = privk; self @@ -153,6 +211,11 @@ impl NakamotoBootPlan { self } + pub fn with_epochs(mut self, epochs: EpochList) -> Self { + self.epochs = Some(epochs); + self + } + pub fn with_initial_balances(mut self, initial_balances: Vec<(PrincipalData, u64)>) -> Self { self.initial_balances = initial_balances; self @@ -255,8 +318,8 @@ impl NakamotoBootPlan { for (i, peer) in other_peers.iter_mut().enumerate() { peer.next_burnchain_block(burn_ops.to_vec()); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); @@ -265,10 +328,9 @@ impl NakamotoBootPlan { for block in blocks { debug!( - "Apply block {} (sighash {}) to peer {} ({})", + "Apply block {} (sighash {}) to peer {i} ({})", &block.block_id(), &block.header.signer_signature_hash(), - i, &peer.to_neighbor().addr ); let block_id = block.block_id(); @@ -283,29 +345,25 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted.is_accepted() { - test_debug!("Accepted Nakamoto block {block_id} to other peer {}", i); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); - } else { - panic!( - "Did NOT accept Nakamoto block {block_id} to other peer {}", - i - ); - } + assert!( + accepted.is_accepted(), + "Did NOT accept Nakamoto block {block_id} to other peer {i}" + ); + test_debug!("Accepted Nakamoto block {block_id} to other peer {i}"); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); possible_chain_tips.insert(block.block_id()); // process it - peer.coord.handle_new_stacks_block().unwrap(); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); } for block in malleablized_blocks { debug!( - "Apply malleablized block {} (sighash {}) to peer {} ({})", + "Apply malleablized block {} (sighash {}) to peer {i} ({})", &block.block_id(), &block.header.signer_signature_hash(), - i, &peer.to_neighbor().addr ); let block_id = block.block_id(); @@ -320,342 +378,92 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted.is_accepted() { - test_debug!( - "Accepted malleablized Nakamoto block {block_id} to other peer {}", - i - ); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); - } else { - panic!( - "Did NOT accept malleablized Nakamoto block {block_id} to other peer {}", - i - ); - } + assert!( + accepted.is_accepted(), + "Did NOT accept malleablized Nakamoto block {block_id} to other peer {i}" + ); + test_debug!("Accepted malleablized Nakamoto block {block_id} to other peer {i}"); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); possible_chain_tips.insert(block.block_id()); // process it - peer.coord.handle_new_stacks_block().unwrap(); - peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_nakamoto_stacks_block().unwrap(); } - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); peer.refresh_burnchain_view(); assert!(possible_chain_tips.contains(&peer.network.stacks_tip.block_id())); } } + /// Make a chainstate and transition it into the Nakamoto epoch. + /// The node needs to be stacking; otherwise, Nakamoto won't activate. + pub fn boot_nakamoto_chainstate( + self, + observer: Option<&TestEventObserver>, + ) -> TestChainstate<'_> { + let chainstate_config = self.build_nakamoto_chainstate_config(); + let mut chain = TestChainstate::new_with_observer(chainstate_config, observer); + chain.mine_malleablized_blocks = self.malleablized_blocks; + let mut chain_nonce = 0; + chain.advance_to_nakamoto_epoch(&self.private_key, &mut chain_nonce); + chain + } + /// Make a peer and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. - fn boot_nakamoto_peers( - mut self, + /// Boot a TestPeer and followers into the Nakamoto epoch + pub fn boot_nakamoto_peers( + self, observer: Option<&TestEventObserver>, ) -> (TestPeer<'_>, Vec>) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); - peer_config.network_id = self.network_id; + peer_config.chain_config = self.build_nakamoto_chainstate_config(); peer_config.private_key = self.private_key.clone(); - peer_config.txindex = self.txindex; - - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&self.private_key)], - ) - .unwrap(); - - // reward cycles are 5 blocks long - // first 25 blocks are boot-up - // reward cycle 6 instantiates pox-3 - // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation + peer_config.connection_opts.auth_token = Some("password".to_string()); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only( - (self.pox_constants.pox_4_activation_height - + self.pox_constants.reward_cycle_length - + 1) - .into(), - )); - peer_config.initial_balances = vec![]; - if self.add_default_balance { - peer_config - .initial_balances - .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); - } - peer_config - .initial_balances - .append(&mut self.initial_balances.clone()); - peer_config.connection_opts.auth_token = Some("password".to_string()); - // Create some balances for test Stackers - // They need their stacking amount + enough to pay fees - let fee_payment_balance = 10_000; - let stacker_balances = self.test_stackers.iter().map(|test_stacker| { - ( - PrincipalData::from(key_to_stacks_addr(&test_stacker.stacker_private_key)), - u64::try_from(test_stacker.amount).expect("Stacking amount too large"), - ) - }); - let signer_balances = self.test_stackers.iter().map(|test_stacker| { - ( - PrincipalData::from(key_to_stacks_addr(&test_stacker.signer_private_key)), - fee_payment_balance, - ) - }); - - peer_config.initial_balances.extend(stacker_balances); - peer_config.initial_balances.extend(signer_balances); - peer_config.test_signers = Some(self.test_signers.clone()); - peer_config.test_stackers = Some(self.test_stackers.clone()); - peer_config.burnchain.pox_constants = self.pox_constants.clone(); let mut peer = TestPeer::new_with_observer(peer_config.clone(), observer); - - peer.mine_malleablized_blocks = self.malleablized_blocks; + peer.chain.mine_malleablized_blocks = self.malleablized_blocks; let mut other_peers = vec![]; for i in 0..self.num_peers { let mut other_config = peer_config.clone(); - other_config.test_name = format!("{}.follower", &peer.config.test_name); + other_config.chain_config.test_name = + format!("{}.follower", &peer_config.chain_config.test_name); other_config.server_port = 0; other_config.http_port = 0; - other_config.test_stackers = peer.config.test_stackers.clone(); + other_config.chain_config.test_stackers = + peer_config.chain_config.test_stackers.clone(); other_config.private_key = StacksPrivateKey::from_seed(&(i as u128).to_be_bytes()); - other_config.add_neighbor(&peer.to_neighbor()); let mut other_peer = TestPeer::new_with_observer(other_config, None); - other_peer.mine_malleablized_blocks = self.malleablized_blocks; - + other_peer.chain.mine_malleablized_blocks = self.malleablized_blocks; other_peers.push(other_peer); } - self.advance_to_nakamoto(&mut peer, &mut other_peers); - (peer, other_peers) - } - - /// Bring a TestPeer into the Nakamoto Epoch - fn advance_to_nakamoto(&mut self, peer: &mut TestPeer, other_peers: &mut [TestPeer]) { let mut peer_nonce = 0; let mut other_peer_nonces = vec![0; other_peers.len()]; - let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); - let default_pox_addr = - PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); - - let mut sortition_height = peer.get_burn_block_height(); - debug!("\n\n======================"); - debug!("PoxConstants = {:#?}", &peer.config.burnchain.pox_constants); - debug!("tip = {}", sortition_height); - debug!("========================\n\n"); - - let epoch_25_height = peer - .config - .epochs - .as_ref() - .unwrap() - .iter() - .find(|e| e.epoch_id == StacksEpochId::Epoch25) - .unwrap() - .start_height; - - let epoch_30_height = peer - .config - .epochs - .as_ref() - .unwrap() - .iter() - .find(|e| e.epoch_id == StacksEpochId::Epoch30) - .unwrap() - .start_height; - - // advance to just past pox-4 instantiation - let mut blocks_produced = false; - while sortition_height <= epoch_25_height { - peer.tenure_with_txs(&[], &mut peer_nonce); - for (other_peer, other_peer_nonce) in - other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) - { - other_peer.tenure_with_txs(&[], other_peer_nonce); - } - - sortition_height = peer.get_burn_block_height(); - blocks_produced = true; - } - - // need to produce at least 1 block before making pox-4 lockups: - // the way `burn-block-height` constant works in Epoch 2.5 is such - // that if its the first block produced, this will be 0 which will - // prevent the lockups from being valid. - if !blocks_produced { - peer.tenure_with_txs(&[], &mut peer_nonce); - for (other_peer, other_peer_nonce) in - other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) - { - other_peer.tenure_with_txs(&[], other_peer_nonce); - } - - sortition_height = peer.get_burn_block_height(); - } - - debug!("\n\n======================"); - debug!("Make PoX-4 lockups"); - debug!("========================\n\n"); - - let reward_cycle = peer - .config - .burnchain - .block_height_to_reward_cycle(sortition_height) - .unwrap(); - - // Make all the test Stackers stack - let stack_txs: Vec<_> = peer - .config - .test_stackers - .clone() - .unwrap_or_default() - .iter() - .map(|test_stacker| { - let pox_addr = test_stacker - .pox_addr - .clone() - .unwrap_or(default_pox_addr.clone()); - let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); - let signature = make_pox_4_signer_key_signature( - &pox_addr, - &test_stacker.signer_private_key, - reward_cycle.into(), - &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, - peer.config.network_id, - 12, - max_amount, - 1, - ) - .unwrap() - .to_rsv(); - make_pox_4_lockup_chain_id( - &test_stacker.stacker_private_key, - 0, - test_stacker.amount, - &pox_addr, - 12, - &StacksPublicKey::from_private(&test_stacker.signer_private_key), - sortition_height + 1, - Some(signature), - max_amount, - 1, - peer.config.network_id, - ) - }) - .collect(); - - let mut old_tip = peer.network.stacks_tip.clone(); - let mut stacks_block = peer.tenure_with_txs(&stack_txs, &mut peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, peer.network.parent_stacks_tip); - } + // Advance primary peer and other peers to Nakamoto epoch + peer.chain + .advance_to_nakamoto_epoch(&self.private_key, &mut peer_nonce); for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { - let mut old_tip = other_peer.network.stacks_tip.clone(); - other_peer.tenure_with_txs(&stack_txs, other_peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(other_peer.sortdb().conn()) - .unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = other_peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, other_peer.network.parent_stacks_tip); - } - } - - debug!("\n\n======================"); - debug!("Advance to the Prepare Phase"); - debug!("========================\n\n"); - while !peer.config.burnchain.is_in_prepare_phase(sortition_height) { - let mut old_tip = peer.network.stacks_tip.clone(); - stacks_block = peer.tenure_with_txs(&[], &mut peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, peer.network.parent_stacks_tip); - } - other_peers - .iter_mut() - .zip(other_peer_nonces.iter_mut()) - .for_each(|(peer, nonce)| { - let mut old_tip = peer.network.stacks_tip.clone(); - peer.tenure_with_txs(&[], nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()) - .unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, peer.network.parent_stacks_tip); - } - }); - sortition_height = peer.get_burn_block_height(); - } - - debug!("\n\n======================"); - debug!("Advance to Epoch 3.0"); - debug!("========================\n\n"); - - // advance to the start of epoch 3.0 - while sortition_height < epoch_30_height - 1 { - let mut old_tip = peer.network.stacks_tip.clone(); - peer.tenure_with_txs(&[], &mut peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, peer.network.parent_stacks_tip); - } - - for (other_peer, other_peer_nonce) in - other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) - { - let mut old_tip = peer.network.stacks_tip.clone(); - other_peer.tenure_with_txs(&[], other_peer_nonce); - - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(other_peer.sortdb().conn()) - .unwrap(); - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); - if old_tip.block_id() != stacks_tip { - old_tip.burnchain_height = - other_peer.network.parent_stacks_tip.burnchain_height; - assert_eq!(old_tip, other_peer.network.parent_stacks_tip); - } - } - sortition_height = peer.get_burn_block_height(); + other_peer + .chain + .advance_to_nakamoto_epoch(&self.private_key, other_peer_nonce); } - debug!("\n\n======================"); - debug!("Welcome to Nakamoto!"); - debug!("========================\n\n"); + (peer, other_peers) } pub fn boot_into_nakamoto_peers( @@ -701,6 +509,7 @@ impl NakamotoBootPlan { blocks_since_last_tenure, ); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change_extend.clone()); @@ -763,7 +572,7 @@ impl NakamotoBootPlan { .collect(); let malleablized_blocks = - std::mem::replace(&mut peer.malleablized_blocks, vec![]); + std::mem::replace(&mut peer.chain.malleablized_blocks, vec![]); for mblk in malleablized_blocks.iter() { malleablized_block_ids.insert(mblk.block_id()); } @@ -795,10 +604,11 @@ impl NakamotoBootPlan { last_tenure_change = Some(tenure_change.clone()); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); debug!("\n\nNew tenure: {}\n\n", &consensus_hash); @@ -862,7 +672,7 @@ impl NakamotoBootPlan { .collect(); let malleablized_blocks = - std::mem::replace(&mut peer.malleablized_blocks, vec![]); + std::mem::replace(&mut peer.chain.malleablized_blocks, vec![]); for mblk in malleablized_blocks.iter() { malleablized_block_ids.insert(mblk.block_id()); } @@ -886,8 +696,8 @@ impl NakamotoBootPlan { // check that our tenure-extends have been getting applied let (highest_tenure, sort_tip) = { - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); + let chainstate = &mut peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -924,8 +734,18 @@ impl NakamotoBootPlan { // transaction in `all_blocks` ran to completion if let Some(observer) = observer { let mut observed_blocks = observer.get_blocks(); - let mut block_idx = (peer.config.burnchain.pox_constants.pox_4_activation_height - + peer.config.burnchain.pox_constants.reward_cycle_length + let mut block_idx = (peer + .config + .chain_config + .burnchain + .pox_constants + .pox_4_activation_height + + peer + .config + .chain_config + .burnchain + .pox_constants + .reward_cycle_length - 25) as usize; // filter out observed blocks that are malleablized @@ -981,8 +801,8 @@ impl NakamotoBootPlan { // verify that all other peers kept pace with this peer for other_peer in other_peers.iter_mut() { let (other_highest_tenure, other_sort_tip) = { - let chainstate = &mut other_peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = other_peer.sortdb.as_mut().unwrap(); + let chainstate = &mut other_peer.chain.stacks_node.as_mut().unwrap().chainstate; + let sort_db = other_peer.chain.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_ongoing_tenure( &mut chainstate.index_conn(), @@ -1830,8 +1650,8 @@ fn test_update_highest_stacks_height_of_neighbors( old_height.map(|h| (SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), h)); peer.network.highest_stacks_neighbor = prev_highest_neighbor; - let peer_sortdb = peer.sortdb.take().unwrap(); - let mut peer_stacks_node = peer.stacks_node.take().unwrap(); + let peer_sortdb = peer.chain.sortdb.take().unwrap(); + let mut peer_stacks_node = peer.chain.stacks_node.take().unwrap(); let mut peer_mempool = peer.mempool.take().unwrap(); let rpc_args = RPCHandlerArgsType::make_default(); let mut node_state = StacksNodeState::new( diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index 496567b31e7..874d385c5e2 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -322,7 +322,7 @@ fn test_step_walk_1_neighbor_bad_epoch() { // peer 1 thinks its always epoch 2.0 peer_1_config.peer_version = 0x18000000; - peer_1_config.epochs = Some(EpochList::new(&[StacksEpoch { + peer_1_config.chain_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, end_height: STACKS_EPOCH_MAX, @@ -332,7 +332,7 @@ fn test_step_walk_1_neighbor_bad_epoch() { // peer 2 thinks its always epoch 2.05 peer_2_config.peer_version = 0x18000005; - peer_2_config.epochs = Some(EpochList::new(&[StacksEpoch { + peer_2_config.chain_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch2_05, start_height: 0, end_height: STACKS_EPOCH_MAX, @@ -1401,7 +1401,7 @@ fn test_step_walk_2_neighbors_rekey() { peer_2_config.connection_opts.disable_inv_sync = true; peer_2_config.connection_opts.disable_block_download = true; - let first_block_height = peer_1_config.current_block + 1; + let first_block_height = peer_1_config.chain_config.current_block + 1; // make keys expire soon peer_1_config.private_key_expire = first_block_height + 3; @@ -1500,13 +1500,13 @@ fn test_step_walk_2_neighbors_different_networks() { let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); let peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.network_id = peer_2_config.network_id + 1; + peer_1_config.chain_config.network_id = peer_2_config.chain_config.network_id + 1; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); let mut peer_1_neighbor = peer_1.to_neighbor(); - peer_1_neighbor.addr.network_id = peer_2.config.network_id; + peer_1_neighbor.addr.network_id = peer_2.config.chain_config.network_id; peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); peer_2.add_neighbor(&mut peer_1_neighbor, None, true); diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index c293346e42f..b0c834e3113 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -36,6 +36,7 @@ use crate::chainstate::stacks::tests::{ make_user_stacks_transfer, }; use crate::chainstate::stacks::{Error as ChainstateError, *}; +use crate::chainstate::tests::TestChainstate; use crate::clarity_vm::clarity::ClarityConnection; use crate::core::*; use crate::net::api::getinfo::RPCPeerInfoData; @@ -564,12 +565,11 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { peer_configs[2].add_neighbor(&peer_1); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -581,12 +581,12 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { // cycle, since pushing block/microblock // announcements in reward cycles the remote // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -601,16 +601,15 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -628,7 +627,7 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { // work, and for (Micro)BlocksAvailable messages to be accepted let peer_1_nk = peers[1].to_neighbor().addr; let peer_2_nk = peers[2].to_neighbor().addr; - let bc = peers[1].config.burnchain.clone(); + let bc = peers[1].config.chain_config.burnchain.clone(); match peers[2].network.inv_state { Some(ref mut inv_state) => { if inv_state.get_stats(&peer_1_nk).is_none() { @@ -656,12 +655,11 @@ fn test_get_blocks_and_microblocks_3_peers_push_available() { } } - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -860,12 +858,9 @@ fn push_block( dest ); - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); + let sn = SortitionDB::get_block_snapshot_consensus(peer.sortdb_ref().conn(), &consensus_hash) + .unwrap() + .unwrap(); let consensus_hash = sn.consensus_hash; let msg = StacksMessageType::Blocks(BlocksData { @@ -887,12 +882,9 @@ fn broadcast_block( block.block_hash(), ); - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); + let sn = SortitionDB::get_block_snapshot_consensus(peer.sortdb_ref().conn(), &consensus_hash) + .unwrap() + .unwrap(); let consensus_hash = sn.consensus_hash; let msg = StacksMessageType::Blocks(BlocksData { @@ -1091,12 +1083,11 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( } }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1104,12 +1095,12 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( // build up block data to replicate let mut block_data = vec![]; for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -1123,16 +1114,15 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -1203,8 +1193,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( if let Some((consensus_hash, block, microblocks)) = data_to_push { test_debug!( - "Push block {}/{} and microblocks", - &consensus_hash, + "Push block {consensus_hash}/{} and microblocks", block.block_hash() ); @@ -1399,12 +1388,11 @@ fn test_get_blocks_and_microblocks_upload_blocks_http() { let peer_1 = peer_configs[1].to_neighbor(); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1416,12 +1404,12 @@ fn test_get_blocks_and_microblocks_upload_blocks_http() { // cycle, since pushing block/microblock // announcements in reward cycles the remote // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -1436,16 +1424,15 @@ fn test_get_blocks_and_microblocks_upload_blocks_http() { peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -1595,20 +1582,28 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { let initial_balances = vec![ ( PrincipalData::from( - peer_configs[0].spending_account.origin_address().unwrap(), + peer_configs[0] + .chain_config + .spending_account + .origin_address() + .unwrap(), ), 1000000, ), ( PrincipalData::from( - peer_configs[1].spending_account.origin_address().unwrap(), + peer_configs[1] + .chain_config + .spending_account + .origin_address() + .unwrap(), ), 1000000, ), ]; - peer_configs[0].initial_balances = initial_balances.clone(); - peer_configs[1].initial_balances = initial_balances; + peer_configs[0].chain_config.initial_balances = initial_balances.clone(); + peer_configs[1].chain_config.initial_balances = initial_balances; let peer_0 = peer_configs[0].to_neighbor(); let peer_1 = peer_configs[1].to_neighbor(); @@ -1617,12 +1612,11 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { peer_configs[1].add_neighbor(&peer_0); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -1630,12 +1624,12 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { // build up block data to replicate let mut block_data = vec![]; for b in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -1649,7 +1643,7 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); @@ -1659,10 +1653,9 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { } } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -1971,12 +1964,18 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { } let initial_balances = vec![( - PrincipalData::from(peer_configs[0].spending_account.origin_address().unwrap()), + PrincipalData::from( + peer_configs[0] + .chain_config + .spending_account + .origin_address() + .unwrap(), + ), 1000000, )]; for i in 0..peer_configs.len() { - peer_configs[i].initial_balances = initial_balances.clone(); + peer_configs[i].chain_config.initial_balances = initial_balances.clone(); } // connectivity @@ -1988,12 +1987,11 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { } }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -2001,12 +1999,12 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { // build up block data to replicate let mut block_data = vec![]; for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -2020,16 +2018,15 @@ fn test_get_blocks_and_microblocks_peers_broadcast() { peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), @@ -2300,12 +2297,11 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { peer_configs[1].add_neighbor(&peer_0); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -2313,12 +2309,12 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { // build up block data to replicate let mut block_data = vec![]; for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); if peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap() @@ -2332,16 +2328,15 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); for i in 1..peers.len() { peers[i].next_burnchain_block_raw(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -2355,10 +2350,8 @@ fn test_get_blocks_and_microblocks_2_peers_antientropy() { for i in 1..peers.len() { peers[i].next_burnchain_block_raw(vec![]); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push((sn.consensus_hash.clone(), None, None)); block_data @@ -2432,12 +2425,11 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { peer_configs[1].add_neighbor(&peer_0); }, |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let this_reward_cycle = peers[0] .config + .chain_config .burnchain .block_height_to_reward_cycle(tip.block_height) .unwrap(); @@ -2445,17 +2437,16 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { // build up block data to replicate let mut block_data = vec![]; for block_num in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let tip = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); let (_, burn_header_hash, consensus_hash) = peers[0].next_burnchain_block(burn_ops.clone()); peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + TestChainstate::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); if block_num == 0 { for i in 1..peers.len() { @@ -2467,10 +2458,9 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { all_sortitions.push(burn_ops.clone()); } - let sn = SortitionDB::get_canonical_burn_chain_tip( - peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb_ref().conn()) + .unwrap(); block_data.push(( sn.consensus_hash.clone(), Some(stacks_block), @@ -2515,12 +2505,12 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { debug!( "Push at {}, need {}", tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height + - peers[1].config.chain_config.burnchain.first_block_height - 1, *pushed_i ); if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height + - peers[1].config.chain_config.burnchain.first_block_height - 1 == *pushed_i as u64 { @@ -2543,14 +2533,13 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { *pushed_i += 1; } debug!( - "Sortition at {}, need {}", + "Sortition at {}, need {i}", tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1, - *i + - peers[1].config.chain_config.burnchain.first_block_height + - 1 ); if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height + - peers[1].config.chain_config.burnchain.first_block_height - 1 == *i as u64 { @@ -2569,7 +2558,7 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { for ((event_id, _neighbor_key), pending) in peers[1].network.pending_messages.iter() { - debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); + debug!("Pending at {i} is ({event_id}, {})", pending.len()); if !pending.is_empty() { update_sortition = true; } @@ -2732,8 +2721,8 @@ fn process_new_blocks_rejects_problematic_asts() { let initial_balances = vec![(addr.to_account_principal(), 100000000000)]; let mut peer_config = TestPeerConfig::new(function_name!(), 32019, 32020); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(EpochList::new(&[ + peer_config.chain_config.initial_balances = initial_balances; + peer_config.chain_config.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -2749,18 +2738,17 @@ fn process_new_blocks_rejects_problematic_asts() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ])); - let burnchain = peer_config.burnchain.clone(); + let burnchain = peer_config.chain_config.burnchain.clone(); // activate new AST rules right away let mut peer = TestPeer::new(peer_config); - let sortdb = peer.sortdb.take().unwrap(); - peer.sortdb = Some(sortdb); + let sortdb = peer.chain.sortdb.take().unwrap(); + peer.chain.sortdb = Some(sortdb); - let chainstate_path = peer.chainstate_path.clone(); + let chainstate_path = peer.chain.chainstate_path.clone(); let first_stacks_block_height = { - let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); + let sn = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb_ref().conn()).unwrap(); sn.block_height }; @@ -2775,8 +2763,7 @@ fn process_new_blocks_rejects_problematic_asts() { bytes.len() as u64 }; - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb_ref().conn()).unwrap(); let mblock_privk = StacksPrivateKey::random(); @@ -2838,8 +2825,7 @@ fn process_new_blocks_rejects_problematic_asts() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); peer.process_stacks_epoch(&block, &consensus_hash, &[]); - let tip = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb_ref().conn()).unwrap(); let (burn_ops, bad_block, mut microblocks) = peer.make_tenure( |ref mut miner, @@ -3087,12 +3073,12 @@ fn process_new_blocks_rejects_problematic_asts() { .confirmed_microblocks .push((new_consensus_hash.clone(), vec![bad_mblock], 234)); - let mut sortdb = peer.sortdb.take().unwrap(); + let mut sortdb = peer.chain.sortdb.take().unwrap(); let (processed_blocks, processed_mblocks, relay_mblocks, bad_neighbors) = Relayer::process_new_blocks( &mut network_result, &mut sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, + &mut peer.chain.stacks_node.as_mut().unwrap().chainstate, None, ) .unwrap(); @@ -3107,7 +3093,7 @@ fn process_new_blocks_rejects_problematic_asts() { let txs_relayed = Relayer::process_transactions( &mut network_result, &sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, + &mut peer.chain.stacks_node.as_mut().unwrap().chainstate, peer.mempool.as_mut().unwrap(), None, ) @@ -3148,8 +3134,8 @@ fn test_block_pay_to_contract_gated_at_v210() { network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); - peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -3233,8 +3219,8 @@ fn test_block_pay_to_contract_gated_at_v210() { let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); match Relayer::process_new_anchored_block( &sortdb.index_conn(), &mut node.chainstate, @@ -3247,11 +3233,11 @@ fn test_block_pay_to_contract_gated_at_v210() { } Err(chainstate_error::InvalidStacksBlock(_)) => {} Err(e) => { - panic!("Got unexpected error {:?}", &e); + panic!("Got unexpected error {e:?}"); } }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } // *now* it should succeed, since tenure 28 was in epoch 2.1 @@ -3259,8 +3245,8 @@ fn test_block_pay_to_contract_gated_at_v210() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); match Relayer::process_new_anchored_block( &sortdb.index_conn(), &mut node.chainstate, @@ -3279,8 +3265,8 @@ fn test_block_pay_to_contract_gated_at_v210() { panic!("Got unexpected error {:?}", &e); } }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } #[test] @@ -3288,7 +3274,13 @@ fn test_block_versioned_smart_contract_gated_at_v210() { let mut peer_config = TestPeerConfig::new(function_name!(), 4248, 4249); let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + PrincipalData::from( + peer_config + .chain_config + .spending_account + .origin_address() + .unwrap(), + ), 1000000, )]; @@ -3323,9 +3315,9 @@ fn test_block_versioned_smart_contract_gated_at_v210() { }, ]); - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + peer_config.chain_config.initial_balances = initial_balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -3412,8 +3404,8 @@ fn test_block_versioned_smart_contract_gated_at_v210() { let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); match Relayer::process_new_anchored_block( &sortdb.index_conn(), &mut node.chainstate, @@ -3422,16 +3414,16 @@ fn test_block_versioned_smart_contract_gated_at_v210() { 123, ) { Ok(x) => { - eprintln!("{:?}", &stacks_block); + eprintln!("{stacks_block:?}"); panic!("Stored pay-to-contract stacks block before epoch 2.1"); } Err(chainstate_error::InvalidStacksBlock(_)) => {} Err(e) => { - panic!("Got unexpected error {:?}", &e); + panic!("Got unexpected error {e:?}"); } }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } // *now* it should succeed, since tenure 28 was in epoch 2.1 @@ -3439,28 +3431,23 @@ fn test_block_versioned_smart_contract_gated_at_v210() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); + let x = Relayer::process_new_anchored_block( &sortdb.index_conn(), &mut node.chainstate, &consensus_hash, &stacks_block, 123, - ) { - Ok(x) => { - assert_eq!( - x, - BlockAcceptResponse::Accepted, - "Failed to process valid versioned smart contract block" - ); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + ) + .unwrap_or_else(|e| panic!("Got unexpected error {e:?}")); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Failed to process valid versioned smart contract block" + ); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } #[test] @@ -3468,7 +3455,13 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { let mut peer_config = TestPeerConfig::new(function_name!(), 4250, 4251); let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + PrincipalData::from( + peer_config + .chain_config + .spending_account + .origin_address() + .unwrap(), + ), 1000000, )]; @@ -3503,9 +3496,9 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { }, ]); - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); + peer_config.chain_config.epochs = Some(epochs); + peer_config.chain_config.initial_balances = initial_balances; + let burnchain = peer_config.chain_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); let versioned_contract_opt: RefCell> = RefCell::new(None); @@ -3599,8 +3592,8 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); // the empty block should be accepted match Relayer::process_new_anchored_block( @@ -3623,7 +3616,7 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { }; // process it - peer.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); // the mempool would reject a versioned contract transaction, since we're not yet at // tenure 28 @@ -3648,16 +3641,16 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { } }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } // *now* it should succeed, since tenure 28 was in epoch 2.1 let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let mut node = peer.chain.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); match Relayer::process_new_anchored_block( @@ -3680,7 +3673,7 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { }; // process it - peer.coord.handle_new_stacks_block().unwrap(); + peer.chain.coord.handle_new_stacks_block().unwrap(); // the mempool would accept a versioned contract transaction, since we're not yet at // tenure 28 @@ -3696,8 +3689,8 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { panic!("will_admit_mempool_tx {:?}", &e); }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(node); } // TODO: process bans diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index f1c5fdf39fc..0196406f6aa 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -33,6 +33,7 @@ use crate::chainstate::nakamoto::NakamotoBlockHeader; use crate::chainstate::stacks::test::{make_codec_test_block, make_codec_test_microblock}; use crate::chainstate::stacks::tests::TestStacksNode; use crate::chainstate::stacks::*; +use crate::chainstate::tests::TestChainstate; use crate::core::*; use crate::net::relay::{AcceptedNakamotoBlocks, ProcessedNetReceipts, Relayer}; use crate::net::stackerdb::StackerDBs; @@ -60,13 +61,13 @@ impl ExitedPeer { Self { config: peer.config, network: peer.network, - sortdb: peer.sortdb, - miner: peer.miner, - stacks_node: peer.stacks_node, + sortdb: peer.chain.sortdb, + miner: peer.chain.miner, + stacks_node: peer.chain.stacks_node, relayer: peer.relayer, mempool: peer.mempool, - chainstate_path: peer.chainstate_path, - indexer: peer.indexer, + chainstate_path: peer.chain.chainstate_path, + indexer: peer.chain.indexer, } } @@ -91,7 +92,7 @@ impl ExitedPeer { ibd, 100, &RPCHandlerArgs::default(), - self.config.txindex, + self.config.chain_config.txindex, )?; let receipts_res = self.relayer.process_network_result( self.network.get_local_peer(), @@ -180,8 +181,8 @@ impl SeedNode { ) .unwrap(); - let mut test_signers = peer.config.test_signers.take().unwrap(); - let test_stackers = peer.config.test_stackers.take().unwrap(); + let mut test_signers = peer.config.chain_config.test_signers.take().unwrap(); + let test_stackers = peer.config.chain_config.test_stackers.take().unwrap(); let mut all_blocks: Vec = vec![]; @@ -208,9 +209,10 @@ impl SeedNode { tenure_change.burn_view_consensus_hash = consensus_hash.clone(); let tenure_change_tx = peer + .chain .miner .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + let coinbase_tx = peer.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let num_blocks: usize = (thread_rng().gen::() % 10) + 1; @@ -264,8 +266,8 @@ impl SeedNode { // relay these blocks let local_peer = peer.network.get_local_peer().clone(); - let sortdb = peer.sortdb.take().unwrap(); - let stacks_node = peer.stacks_node.take().unwrap(); + let sortdb = peer.chain.sortdb.take().unwrap(); + let stacks_node = peer.chain.stacks_node.take().unwrap(); peer.relayer.relay_epoch3_blocks( &local_peer, @@ -276,8 +278,8 @@ impl SeedNode { }], ); - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(stacks_node); + peer.chain.sortdb = Some(sortdb); + peer.chain.stacks_node = Some(stacks_node); // send the blocks to the unit test as well if comms @@ -291,11 +293,12 @@ impl SeedNode { // if we're starting a new reward cycle, then save the current one let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); + let sort_db = peer.chain.sortdb.as_mut().unwrap(); SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() }; if peer .config + .chain_config .burnchain .is_reward_cycle_start(tip.block_height) { @@ -305,8 +308,8 @@ impl SeedNode { all_blocks.append(&mut blocks); } - peer.config.test_signers = Some(test_signers); - peer.config.test_stackers = Some(test_stackers); + peer.config.chain_config.test_signers = Some(test_signers); + peer.config.chain_config.test_stackers = Some(test_stackers); let exited_peer = ExitedPeer::from_test_peer(peer); @@ -534,7 +537,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { let peer_nk = peer.to_neighbor().addr; let mut follower = followers.pop().unwrap(); - let test_path = TestPeer::make_test_path(&follower.config); + let test_path = TestChainstate::make_test_path(&follower.config.chain_config); let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); @@ -570,8 +573,8 @@ fn test_no_buffer_ready_nakamoto_blocks() { Some(SeedData::Blocks(blocks)) => { debug!("Follower got Nakamoto blocks {:?}", &blocks); - let mut sortdb = follower.sortdb.take().unwrap(); - let mut node = follower.stacks_node.take().unwrap(); + let mut sortdb = follower.chain.sortdb.take().unwrap(); + let mut node = follower.chain.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -728,8 +731,8 @@ fn test_no_buffer_ready_nakamoto_blocks() { )); } - follower.stacks_node = Some(node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(node); + follower.chain.sortdb = Some(sortdb); } Some(SeedData::Exit(exited)) => { debug!("Follower got seed exit"); @@ -739,20 +742,24 @@ fn test_no_buffer_ready_nakamoto_blocks() { } } - follower.coord.handle_new_burnchain_block().unwrap(); - follower.coord.handle_new_stacks_block().unwrap(); - follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + follower.chain.coord.handle_new_burnchain_block().unwrap(); + follower.chain.coord.handle_new_stacks_block().unwrap(); + follower + .chain + .coord + .handle_new_nakamoto_stacks_block() + .unwrap(); } // compare chain tips - let sortdb = follower.sortdb.take().unwrap(); - let stacks_node = follower.stacks_node.take().unwrap(); + let sortdb = follower.chain.sortdb.take().unwrap(); + let stacks_node = follower.chain.stacks_node.take().unwrap(); let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let follower_stacks_tip = NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) .unwrap(); - follower.stacks_node = Some(stacks_node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(stacks_node); + follower.chain.sortdb = Some(sortdb); let mut exited_peer = exited_peer.unwrap(); let sortdb = exited_peer.sortdb.take().unwrap(); @@ -785,7 +792,7 @@ fn test_buffer_nonready_nakamoto_blocks() { let peer_nk = peer.to_neighbor().addr; let mut follower = followers.pop().unwrap(); - let test_path = TestPeer::make_test_path(&follower.config); + let test_path = TestChainstate::make_test_path(&follower.config.chain_config); let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); @@ -850,8 +857,8 @@ fn test_buffer_nonready_nakamoto_blocks() { debug!("Follower got Nakamoto blocks {:?}", &blocks); all_blocks.push(blocks.clone()); - let sortdb = follower.sortdb.take().unwrap(); - let node = follower.stacks_node.take().unwrap(); + let sortdb = follower.chain.sortdb.take().unwrap(); + let node = follower.chain.stacks_node.take().unwrap(); // we will need to buffer this since the sortition for these blocks hasn't been // processed yet @@ -908,8 +915,8 @@ fn test_buffer_nonready_nakamoto_blocks() { true, ); - follower.stacks_node = Some(node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(node); + follower.chain.sortdb = Some(sortdb); } Some(SeedData::Exit(exited)) => { debug!("Follower got seed exit"); @@ -928,8 +935,8 @@ fn test_buffer_nonready_nakamoto_blocks() { } // process the last buffered messages - let mut sortdb = follower.sortdb.take().unwrap(); - let mut node = follower.stacks_node.take().unwrap(); + let mut sortdb = follower.chain.sortdb.take().unwrap(); + let mut node = follower.chain.stacks_node.take().unwrap(); if let Some(mut network_result) = network_result.take() { follower_relayer.process_new_epoch3_blocks( @@ -943,8 +950,8 @@ fn test_buffer_nonready_nakamoto_blocks() { ); } - follower.stacks_node = Some(node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(node); + follower.chain.sortdb = Some(sortdb); network_result = follower .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) @@ -957,8 +964,8 @@ fn test_buffer_nonready_nakamoto_blocks() { } if let Some(mut network_result) = network_result.take() { - let mut sortdb = follower.sortdb.take().unwrap(); - let mut node = follower.stacks_node.take().unwrap(); + let mut sortdb = follower.chain.sortdb.take().unwrap(); + let mut node = follower.chain.stacks_node.take().unwrap(); let num_processed = follower_relayer.process_new_epoch3_blocks( follower.network.get_local_peer(), &mut network_result, @@ -969,24 +976,28 @@ fn test_buffer_nonready_nakamoto_blocks() { None, ); info!("Processed {} unsolicited Nakamoto blocks", num_processed); - follower.stacks_node = Some(node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(node); + follower.chain.sortdb = Some(sortdb); } - follower.coord.handle_new_burnchain_block().unwrap(); - follower.coord.handle_new_stacks_block().unwrap(); - follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + follower.chain.coord.handle_new_burnchain_block().unwrap(); + follower.chain.coord.handle_new_stacks_block().unwrap(); + follower + .chain + .coord + .handle_new_nakamoto_stacks_block() + .unwrap(); } // compare chain tips - let sortdb = follower.sortdb.take().unwrap(); - let stacks_node = follower.stacks_node.take().unwrap(); + let sortdb = follower.chain.sortdb.take().unwrap(); + let stacks_node = follower.chain.stacks_node.take().unwrap(); let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let follower_stacks_tip = NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) .unwrap(); - follower.stacks_node = Some(stacks_node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(stacks_node); + follower.chain.sortdb = Some(sortdb); let mut exited_peer = exited_peer.unwrap(); let sortdb = exited_peer.sortdb.take().unwrap(); @@ -1024,7 +1035,7 @@ fn test_nakamoto_boot_node_from_block_push() { let peer_nk = peer.to_neighbor().addr; let mut follower = followers.pop().unwrap(); - let test_path = TestPeer::make_test_path(&follower.config); + let test_path = TestChainstate::make_test_path(&follower.config.chain_config); let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); @@ -1074,9 +1085,13 @@ fn test_nakamoto_boot_node_from_block_push() { } } - follower.coord.handle_new_burnchain_block().unwrap(); - follower.coord.handle_new_stacks_block().unwrap(); - follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + follower.chain.coord.handle_new_burnchain_block().unwrap(); + follower.chain.coord.handle_new_stacks_block().unwrap(); + follower + .chain + .coord + .handle_new_nakamoto_stacks_block() + .unwrap(); } // recover exited peer and get its chain tips @@ -1100,8 +1115,8 @@ fn test_nakamoto_boot_node_from_block_push() { .unwrap(); // compare chain tips - let sortdb = follower.sortdb.take().unwrap(); - let stacks_node = follower.stacks_node.take().unwrap(); + let sortdb = follower.chain.sortdb.take().unwrap(); + let stacks_node = follower.chain.stacks_node.take().unwrap(); let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let follower_stacks_tip = NakamotoChainState::get_canonical_block_header( @@ -1109,8 +1124,8 @@ fn test_nakamoto_boot_node_from_block_push() { &sortdb, ) .unwrap(); - follower.stacks_node = Some(stacks_node); - follower.sortdb = Some(sortdb); + follower.chain.stacks_node = Some(stacks_node); + follower.chain.sortdb = Some(sortdb); debug!("{}: Follower sortition tip: {:?}", i, &follower_burn_tip); debug!("{}: Seed sortition tip: {:?}", i, &exited_peer_burn_tip);