Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ require (
cosmossdk.io/math v1.5.3
github.com/AlecAivazis/survey/v2 v2.3.7
github.com/DataDog/zstd v1.5.7
github.com/LumeraProtocol/lumera v1.11.2-0.20260331140230-4aeb5d0d7a89
github.com/LumeraProtocol/lumera v1.11.2-0.20260413145614-4ffe74bb13dc
github.com/LumeraProtocol/rq-go v0.2.1
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
github.com/cenkalti/backoff/v4 v4.3.0
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/LumeraProtocol/lumera v1.11.2-0.20260331140230-4aeb5d0d7a89 h1:wDZnZ5wi4l0qyMufE3bOQImu1BF/igMAsxr6aMWRmp4=
github.com/LumeraProtocol/lumera v1.11.2-0.20260331140230-4aeb5d0d7a89/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w=
github.com/LumeraProtocol/lumera v1.11.2-0.20260413145614-4ffe74bb13dc h1:B43KT06s/4lE/LyVQevb0Xr5XqKy6nlel1fZh7G7w14=
github.com/LumeraProtocol/lumera v1.11.2-0.20260413145614-4ffe74bb13dc/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w=
github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4=
github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8=
github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
Expand Down
241 changes: 241 additions & 0 deletions pkg/cascadekit/commitment.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,241 @@
package cascadekit

import (
"fmt"
"io"
"os"

"github.com/LumeraProtocol/lumera/x/action/v1/merkle"
actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types"
"lukechampine.com/blake3"
)

const (
// DefaultChunkSize is the default chunk size for LEP-5 commitment (256 KiB).
DefaultChunkSize = 262144
// MinChunkSize is the minimum allowed chunk size.
MinChunkSize = 1
// MaxChunkSize is the maximum allowed chunk size.
MaxChunkSize = 262144
// MinTotalSize is the minimum file size for LEP-5 commitment.
MinTotalSize = 4
// CommitmentType is the commitment type constant for LEP-5.
CommitmentType = "lep5/chunk-merkle/v1"
)

// SelectChunkSize returns the optimal chunk size for a given file size and
// minimum chunk count. It starts at DefaultChunkSize and halves until the
// file produces at least minChunks chunks.
func SelectChunkSize(fileSize int64, minChunks uint32) uint32 {
s := uint32(DefaultChunkSize)
for numChunks(fileSize, s) < minChunks && s > MinChunkSize {
s /= 2
}
return s
}

func numChunks(fileSize int64, chunkSize uint32) uint32 {
n := uint32(fileSize / int64(chunkSize))
if fileSize%int64(chunkSize) != 0 {
n++
}
return n
}

// ChunkFile reads a file and returns its chunks using the given chunk size.
func ChunkFile(path string, chunkSize uint32) ([][]byte, error) {
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
defer f.Close()

fi, err := f.Stat()
if err != nil {
return nil, fmt.Errorf("stat file: %w", err)
}

totalSize := fi.Size()
n := numChunks(totalSize, chunkSize)
chunks := make([][]byte, 0, n)

buf := make([]byte, chunkSize)
Comment on lines +60 to +62
for {
nr, err := io.ReadFull(f, buf)
if nr > 0 {
chunk := make([]byte, nr)
copy(chunk, buf[:nr])
chunks = append(chunks, chunk)
}
if err == io.EOF || err == io.ErrUnexpectedEOF {
break
}
if err != nil {
return nil, fmt.Errorf("read chunk: %w", err)
}
}
return chunks, nil
}

// BuildCommitmentFromFile constructs an AvailabilityCommitment for a file.
// It chunks the file, builds a Merkle tree, and generates challenge indices.
// challengeCount and minChunks are the SVC parameters from the chain.
func BuildCommitmentFromFile(filePath string, challengeCount, minChunks uint32) (*actiontypes.AvailabilityCommitment, *merkle.Tree, error) {
fi, err := os.Stat(filePath)
if err != nil {
return nil, nil, fmt.Errorf("stat file: %w", err)
}
totalSize := fi.Size()
if totalSize < MinTotalSize {
return nil, nil, fmt.Errorf("file too small: %d bytes (minimum %d)", totalSize, MinTotalSize)
}

chunkSize := SelectChunkSize(totalSize, minChunks)
nc := numChunks(totalSize, chunkSize)
if nc < minChunks {
return nil, nil, fmt.Errorf("file produces %d chunks, need at least %d", nc, minChunks)
}

chunks, err := ChunkFile(filePath, chunkSize)
if err != nil {
return nil, nil, err
}

tree, err := merkle.BuildTree(chunks)
if err != nil {
return nil, nil, fmt.Errorf("build merkle tree: %w", err)
}

// Generate challenge indices — simple deterministic selection using tree root as entropy.
m := challengeCount
if m > nc {
m = nc
}
indices := deriveSimpleIndices(tree.Root[:], nc, m)

commitment := &actiontypes.AvailabilityCommitment{
CommitmentType: CommitmentType,
HashAlgo: actiontypes.HashAlgo_HASH_ALGO_BLAKE3,
ChunkSize: chunkSize,
TotalSize: uint64(totalSize),
NumChunks: nc,
Root: tree.Root[:],
ChallengeIndices: indices,
}

return commitment, tree, nil
}
Comment on lines +80 to +127

// GenerateChunkProofs produces Merkle proofs for the challenge indices in the commitment.
func GenerateChunkProofs(tree *merkle.Tree, indices []uint32) ([]*actiontypes.ChunkProof, error) {
proofs := make([]*actiontypes.ChunkProof, len(indices))
for i, idx := range indices {
p, err := tree.GenerateProof(int(idx))
if err != nil {
return nil, fmt.Errorf("generate proof for chunk %d: %w", idx, err)
}

pathHashes := make([][]byte, len(p.PathHashes))
for j, h := range p.PathHashes {
pathHashes[j] = h[:]
}

proofs[i] = &actiontypes.ChunkProof{
ChunkIndex: p.ChunkIndex,
LeafHash: p.LeafHash[:],
PathHashes: pathHashes,
PathDirections: p.PathDirections,
}
}
return proofs, nil
}

// VerifyCommitmentRoot rebuilds the Merkle tree from a file and checks it matches the on-chain root.
func VerifyCommitmentRoot(filePath string, commitment *actiontypes.AvailabilityCommitment) (*merkle.Tree, error) {
if commitment == nil {
return nil, nil // pre-LEP-5 action, nothing to verify
}
if commitment.ChunkSize < MinChunkSize || commitment.ChunkSize > MaxChunkSize {
return nil, fmt.Errorf("invalid chunk size in commitment: %d", commitment.ChunkSize)
}
if commitment.NumChunks == 0 {
return nil, fmt.Errorf("invalid num_chunks in commitment: %d", commitment.NumChunks)
}
if len(commitment.Root) != merkle.HashSize {
return nil, fmt.Errorf("invalid root length in commitment: got %d, expected %d", len(commitment.Root), merkle.HashSize)
}

chunks, err := ChunkFile(filePath, commitment.ChunkSize)
if err != nil {
return nil, fmt.Errorf("chunk file for verification: %w", err)
}

if uint32(len(chunks)) != commitment.NumChunks {
return nil, fmt.Errorf("chunk count mismatch: got %d, expected %d", len(chunks), commitment.NumChunks)
}
Comment on lines +154 to +175

tree, err := merkle.BuildTree(chunks)
if err != nil {
return nil, fmt.Errorf("build merkle tree for verification: %w", err)
}

var expectedRoot [merkle.HashSize]byte
copy(expectedRoot[:], commitment.Root)
if tree.Root != expectedRoot {
return nil, fmt.Errorf("merkle root mismatch: computed %x, expected %x", tree.Root[:], commitment.Root)
}

return tree, nil
}

// deriveSimpleIndices generates m unique indices in [0, numChunks) using BLAKE3(root || counter).
func deriveSimpleIndices(root []byte, numChunks, m uint32) []uint32 {
if numChunks == 0 || m == 0 {
return nil
}

indices := make([]uint32, 0, m)
used := make(map[uint32]struct{}, m)
Comment on lines +197 to +198
counter := uint32(0)

// Allocate once and only overwrite the counter bytes per iteration.
buf := make([]byte, len(root)+4)
copy(buf, root)

// Guard against pathological runtimes from pure rejection sampling when m ~= numChunks.
maxAttempts := numChunks * 32
if maxAttempts < m {
maxAttempts = m
}

for uint32(len(indices)) < m && counter < maxAttempts {
// BLAKE3(root || uint32be(counter))
buf[len(root)] = byte(counter >> 24)
buf[len(root)+1] = byte(counter >> 16)
buf[len(root)+2] = byte(counter >> 8)
buf[len(root)+3] = byte(counter)

h := blake3.Sum256(buf)
// Use first 8 bytes as uint64 mod numChunks
val := uint64(h[0])<<56 | uint64(h[1])<<48 | uint64(h[2])<<40 | uint64(h[3])<<32 |
uint64(h[4])<<24 | uint64(h[5])<<16 | uint64(h[6])<<8 | uint64(h[7])
idx := uint32(val % uint64(numChunks))

if _, exists := used[idx]; !exists {
used[idx] = struct{}{}
indices = append(indices, idx)
}
counter++
}

// Deterministic fallback: fill any missing indices in ascending order.
for idx := uint32(0); uint32(len(indices)) < m && idx < numChunks; idx++ {
if _, exists := used[idx]; exists {
continue
}
used[idx] = struct{}{}
indices = append(indices, idx)
}

return indices
}
105 changes: 105 additions & 0 deletions pkg/cascadekit/commitment_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
package cascadekit

import (
"os"
"path/filepath"
"strings"
"testing"
)

func writeTempFile(t *testing.T, data []byte) string {
t.Helper()
dir := t.TempDir()
p := filepath.Join(dir, "file.bin")
if err := os.WriteFile(p, data, 0o600); err != nil {
t.Fatalf("write temp file: %v", err)
}
return p
}

func TestVerifyCommitmentRoot_Valid(t *testing.T) {
path := writeTempFile(t, []byte("hello lep5 commitment"))
commitment, _, err := BuildCommitmentFromFile(path, 8, 4)
if err != nil {
t.Fatalf("build commitment: %v", err)
}

if _, err := VerifyCommitmentRoot(path, commitment); err != nil {
t.Fatalf("verify commitment root: %v", err)
}
}

func TestVerifyCommitmentRoot_RejectsInvalidRootLength(t *testing.T) {
path := writeTempFile(t, []byte("hello lep5 commitment"))
commitment, _, err := BuildCommitmentFromFile(path, 8, 4)
if err != nil {
t.Fatalf("build commitment: %v", err)
}
commitment.Root = []byte{1, 2, 3}

_, err = VerifyCommitmentRoot(path, commitment)
if err == nil || !strings.Contains(err.Error(), "invalid root length") {
t.Fatalf("expected invalid root length error, got: %v", err)
}
}

func TestVerifyCommitmentRoot_RejectsInvalidChunkSize(t *testing.T) {
path := writeTempFile(t, []byte("hello lep5 commitment"))
commitment, _, err := BuildCommitmentFromFile(path, 8, 4)
if err != nil {
t.Fatalf("build commitment: %v", err)
}
commitment.ChunkSize = 0

_, err = VerifyCommitmentRoot(path, commitment)
if err == nil || !strings.Contains(err.Error(), "invalid chunk size") {
t.Fatalf("expected invalid chunk size error, got: %v", err)
}
}

func TestDeriveSimpleIndices_DeterministicAndUnique(t *testing.T) {
root := []byte("fixed-root-seed")
gotA := deriveSimpleIndices(root, 16, 8)
gotB := deriveSimpleIndices(root, 16, 8)

if len(gotA) != 8 || len(gotB) != 8 {
t.Fatalf("unexpected lengths: %d, %d", len(gotA), len(gotB))
}

for i := range gotA {
if gotA[i] != gotB[i] {
t.Fatalf("non-deterministic output at %d: %d != %d", i, gotA[i], gotB[i])
}
}

seen := make(map[uint32]struct{}, len(gotA))
for _, idx := range gotA {
if idx >= 16 {
t.Fatalf("index out of range: %d", idx)
}
if _, ok := seen[idx]; ok {
t.Fatalf("duplicate index: %d", idx)
}
seen[idx] = struct{}{}
}
}

func TestDeriveSimpleIndices_CoversAllWhenMEqualsNumChunks(t *testing.T) {
root := []byte("another-fixed-root-seed")
numChunks := uint32(7)
got := deriveSimpleIndices(root, numChunks, numChunks)
if len(got) != int(numChunks) {
t.Fatalf("expected %d indices, got %d", numChunks, len(got))
}

seen := make(map[uint32]struct{}, len(got))
for _, idx := range got {
if idx >= numChunks {
t.Fatalf("index out of range: %d", idx)
}
seen[idx] = struct{}{}
}
if len(seen) != int(numChunks) {
t.Fatalf("expected full coverage of [0,%d), got %d unique indices", numChunks, len(seen))
}
}
9 changes: 7 additions & 2 deletions pkg/cascadekit/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,17 @@ import (

// NewCascadeMetadata creates a types.CascadeMetadata for RequestAction.
// The keeper will populate rq_ids_max; rq_ids_ids is for FinalizeAction only.
func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, indexSignatureFormat string, public bool) actiontypes.CascadeMetadata {
return actiontypes.CascadeMetadata{
// commitment may be nil for pre-LEP-5 actions.
func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, indexSignatureFormat string, public bool, commitment *actiontypes.AvailabilityCommitment) actiontypes.CascadeMetadata {
meta := actiontypes.CascadeMetadata{
DataHash: dataHashB64,
FileName: fileName,
RqIdsIc: rqIdsIc,
Signatures: indexSignatureFormat,
Public: public,
}
if commitment != nil {
meta.AvailabilityCommitment = commitment
}
return meta
}
2 changes: 1 addition & 1 deletion pkg/cascadekit/request_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,6 @@ func BuildCascadeRequest(layout codec.Layout, fileBytes []byte, fileName string,
if err != nil {
return actiontypes.CascadeMetadata{}, nil, err
}
meta := NewCascadeMetadata(dataHashB64, fileName, uint64(ic), indexSignatureFormat, public)
meta := NewCascadeMetadata(dataHashB64, fileName, uint64(ic), indexSignatureFormat, public, nil)
return meta, indexIDs, nil
}
Loading
Loading