From 88aa055360efdab585e971abf70c556d0fccc02b Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Tue, 18 Nov 2025 00:07:43 +0100 Subject: [PATCH 01/35] feat(da): add Celestia blob API client Add native Celestia blob API client using celestia-node's blob API. - Client with JSON-RPC connection to celestia-node - Native types (Namespace, Blob, Commitment, Proof) - Stub methods (Submit, Get, GetAll, GetProof, Included) - Unit tests for types and client --- da/celestia/client.go | 107 +++++++++++++++++++++++++++ da/celestia/client_test.go | 145 +++++++++++++++++++++++++++++++++++++ da/celestia/types.go | 52 +++++++++++++ da/celestia/types_test.go | 87 ++++++++++++++++++++++ 4 files changed, 391 insertions(+) create mode 100644 da/celestia/client.go create mode 100644 da/celestia/client_test.go create mode 100644 da/celestia/types.go create mode 100644 da/celestia/types_test.go diff --git a/da/celestia/client.go b/da/celestia/client.go new file mode 100644 index 0000000000..63b6587d32 --- /dev/null +++ b/da/celestia/client.go @@ -0,0 +1,107 @@ +package celestia + +import ( + "context" + "fmt" + "net/http" + + "github.com/filecoin-project/go-jsonrpc" + "github.com/rs/zerolog" +) + +// Client connects to celestia-node's blob API via JSON-RPC. +type Client struct { + logger zerolog.Logger + maxBlobSize uint64 + closer jsonrpc.ClientCloser + + Internal struct { + Submit func(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) `perm:"write"` + Get func(ctx context.Context, height uint64, ns Namespace, c Commitment) (*Blob, error) `perm:"read"` + GetAll func(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) `perm:"read"` + GetProof func(ctx context.Context, height uint64, ns Namespace, c Commitment) (*Proof, error) `perm:"read"` + Included func(ctx context.Context, height uint64, ns Namespace, proof *Proof, c Commitment) (bool, error) `perm:"read"` + } +} + +// NewClient creates a new client connected to celestia-node. +// Token is obtained from: celestia light auth write +func NewClient( + ctx context.Context, + logger zerolog.Logger, + addr string, + token string, + maxBlobSize uint64, +) (*Client, error) { + if addr == "" { + return nil, fmt.Errorf("address cannot be empty") + } + + if maxBlobSize == 0 { + return nil, fmt.Errorf("maxBlobSize must be greater than 0") + } + + client := &Client{ + logger: logger, + maxBlobSize: maxBlobSize, + } + + authHeader := http.Header{} + if token != "" { + authHeader.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + } + + closer, err := jsonrpc.NewMergeClient( + ctx, + addr, + "blob", + []interface{}{&client.Internal}, + authHeader, + ) + if err != nil { + return nil, fmt.Errorf("failed to create JSON-RPC client: %w", err) + } + + client.closer = closer + + logger.Info(). + Str("address", addr). + Uint64("max_blob_size", maxBlobSize). + Msg("Celestia blob API client created successfully") + + return client, nil +} + +// Close closes the connection. Safe to call multiple times. +func (c *Client) Close() { + if c.closer != nil { + c.closer() + c.closer = nil + } + c.logger.Debug().Msg("Celestia client connection closed") +} + +// Submit submits blobs to Celestia and returns the height at which they were included. +func (c *Client) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) { + return 0, fmt.Errorf("not implemented yet") +} + +// Get retrieves a single blob by commitment at a given height and namespace. +func (c *Client) Get(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Blob, error) { + return nil, fmt.Errorf("not implemented yet") +} + +// GetAll retrieves all blobs at a given height for the specified namespaces. +func (c *Client) GetAll(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) { + return nil, fmt.Errorf("not implemented yet") +} + +// GetProof retrieves the inclusion proof for a blob. +func (c *Client) GetProof(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Proof, error) { + return nil, fmt.Errorf("not implemented yet") +} + +// Included checks whether a blob is included in the Celestia block. +func (c *Client) Included(ctx context.Context, height uint64, namespace Namespace, proof *Proof, commitment Commitment) (bool, error) { + return false, fmt.Errorf("not implemented yet") +} diff --git a/da/celestia/client_test.go b/da/celestia/client_test.go new file mode 100644 index 0000000000..4d97cd2074 --- /dev/null +++ b/da/celestia/client_test.go @@ -0,0 +1,145 @@ +package celestia + +import ( + "context" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewClient(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + tests := []struct { + name string + addr string + token string + maxBlobSize uint64 + wantErr bool + errContains string + }{ + { + name: "valid parameters", + addr: "http://localhost:26658", + token: "test-token", + maxBlobSize: 1024 * 1024, + wantErr: false, + }, + { + name: "valid parameters without token", + addr: "http://localhost:26658", + token: "", + maxBlobSize: 1024 * 1024, + wantErr: false, + }, + { + name: "empty address", + addr: "", + token: "test-token", + maxBlobSize: 1024, + wantErr: true, + errContains: "address cannot be empty", + }, + { + name: "zero maxBlobSize", + addr: "http://localhost:26658", + token: "test-token", + maxBlobSize: 0, + wantErr: true, + errContains: "maxBlobSize must be greater than 0", + }, + { + name: "invalid address will fail on connection", + addr: "not-a-valid-url", + token: "test-token", + maxBlobSize: 1024, + wantErr: true, + errContains: "failed to create JSON-RPC client", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client, err := NewClient(ctx, logger, tt.addr, tt.token, tt.maxBlobSize) + + if tt.wantErr { + require.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + assert.Nil(t, client) + } else { + require.NoError(t, err) + require.NotNil(t, client) + assert.Equal(t, tt.maxBlobSize, client.maxBlobSize) + assert.NotNil(t, client.closer) + + // Clean up + client.Close() + } + }) + } +} + +func TestClient_Close(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + require.NotNil(t, client) + + // Should not panic + assert.NotPanics(t, func() { + client.Close() + }) + + // Should be safe to call multiple times + assert.NotPanics(t, func() { + client.Close() + }) +} + +func TestClient_BlobAPIMethods(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + require.NotNil(t, client) + defer client.Close() + + // Test that all native blob API methods exist and return "not implemented" for now + t.Run("Submit", func(t *testing.T) { + _, err := client.Submit(ctx, nil, nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not implemented") + }) + + t.Run("Get", func(t *testing.T) { + _, err := client.Get(ctx, 0, nil, nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not implemented") + }) + + t.Run("GetAll", func(t *testing.T) { + _, err := client.GetAll(ctx, 0, nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not implemented") + }) + + t.Run("GetProof", func(t *testing.T) { + _, err := client.GetProof(ctx, 0, nil, nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not implemented") + }) + + t.Run("Included", func(t *testing.T) { + _, err := client.Included(ctx, 0, nil, nil, nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not implemented") + }) +} diff --git a/da/celestia/types.go b/da/celestia/types.go new file mode 100644 index 0000000000..496a151474 --- /dev/null +++ b/da/celestia/types.go @@ -0,0 +1,52 @@ +package celestia + +import ( + "encoding/json" + "fmt" +) + +// Namespace represents a Celestia namespace (29 bytes: 1 version + 28 ID) +type Namespace []byte + +// Commitment represents a blob commitment (merkle root) +type Commitment []byte + +// Blob represents a Celestia blob with namespace and commitment +type Blob struct { + Namespace Namespace `json:"namespace"` + Data []byte `json:"data"` + ShareVer uint32 `json:"share_version"` + Commitment Commitment `json:"commitment"` + Index int `json:"index"` +} + +// Proof represents a Celestia inclusion proof +type Proof struct { + Data []byte `json:"data"` +} + +// SubmitOptions contains options for blob submission +type SubmitOptions struct { + Fee float64 `json:"fee,omitempty"` + GasLimit uint64 `json:"gas_limit,omitempty"` + SignerAddress string `json:"signer_address,omitempty"` +} + +// MarshalJSON implements json.Marshaler for Proof +func (p *Proof) MarshalJSON() ([]byte, error) { + return json.Marshal(p.Data) +} + +// UnmarshalJSON implements json.Unmarshaler for Proof +func (p *Proof) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.Data) +} + +// ValidateNamespace validates that a namespace is properly formatted (29 bytes). +func ValidateNamespace(ns Namespace) error { + const NamespaceSize = 29 + if len(ns) != NamespaceSize { + return fmt.Errorf("invalid namespace size: got %d, expected %d", len(ns), NamespaceSize) + } + return nil +} diff --git a/da/celestia/types_test.go b/da/celestia/types_test.go new file mode 100644 index 0000000000..a4587587c0 --- /dev/null +++ b/da/celestia/types_test.go @@ -0,0 +1,87 @@ +package celestia + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateNamespace(t *testing.T) { + tests := []struct { + name string + namespace Namespace + wantErr bool + }{ + { + name: "valid namespace (29 bytes)", + namespace: make([]byte, 29), + wantErr: false, + }, + { + name: "invalid namespace too short", + namespace: make([]byte, 10), + wantErr: true, + }, + { + name: "invalid namespace too long", + namespace: make([]byte, 30), + wantErr: true, + }, + { + name: "invalid namespace empty", + namespace: []byte{}, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateNamespace(tt.namespace) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestProofJSONMarshaling(t *testing.T) { + proof := &Proof{ + Data: []byte{1, 2, 3, 4, 5}, + } + + // Marshal + data, err := json.Marshal(proof) + require.NoError(t, err) + + // Unmarshal + var decoded Proof + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + assert.Equal(t, proof.Data, decoded.Data) +} + +func TestSubmitOptionsJSON(t *testing.T) { + opts := &SubmitOptions{ + Fee: 0.002, + GasLimit: 100000, + SignerAddress: "celestia1abc123", + } + + // Marshal + data, err := json.Marshal(opts) + require.NoError(t, err) + + // Unmarshal + var decoded SubmitOptions + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + assert.Equal(t, opts.Fee, decoded.Fee) + assert.Equal(t, opts.GasLimit, decoded.GasLimit) + assert.Equal(t, opts.SignerAddress, decoded.SignerAddress) +} From f9dc74611b1562fa527dfc017d2908a5421ae5ce Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Tue, 18 Nov 2025 00:12:45 +0100 Subject: [PATCH 02/35] feat(da): implement blob.Submit with simplified validation Implement Submit method for Celestia blob API client. Validation is delegated to celestia-node rather than duplicating checks in the client layer. --- da/celestia/client.go | 20 +++++++++++- da/celestia/client_test.go | 63 +++++++++++++++++++++++++++++++++----- 2 files changed, 74 insertions(+), 9 deletions(-) diff --git a/da/celestia/client.go b/da/celestia/client.go index 63b6587d32..a8a59eb3a1 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -83,7 +83,25 @@ func (c *Client) Close() { // Submit submits blobs to Celestia and returns the height at which they were included. func (c *Client) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) { - return 0, fmt.Errorf("not implemented yet") + c.logger.Debug(). + Int("num_blobs", len(blobs)). + Msg("Submitting blobs to Celestia") + + height, err := c.Internal.Submit(ctx, blobs, opts) + if err != nil { + c.logger.Error(). + Err(err). + Int("num_blobs", len(blobs)). + Msg("Failed to submit blobs") + return 0, fmt.Errorf("failed to submit blobs: %w", err) + } + + c.logger.Info(). + Uint64("height", height). + Int("num_blobs", len(blobs)). + Msg("Successfully submitted blobs") + + return height, nil } // Get retrieves a single blob by commitment at a given height and namespace. diff --git a/da/celestia/client_test.go b/da/celestia/client_test.go index 4d97cd2074..ceccc7e98b 100644 --- a/da/celestia/client_test.go +++ b/da/celestia/client_test.go @@ -103,7 +103,61 @@ func TestClient_Close(t *testing.T) { }) } -func TestClient_BlobAPIMethods(t *testing.T) { +func TestClient_Submit(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + validNamespace := make([]byte, 29) + validBlob := &Blob{ + Namespace: validNamespace, + Data: []byte("test data"), + } + + tests := []struct { + name string + blobs []*Blob + wantRPC bool + }{ + { + name: "single blob", + blobs: []*Blob{validBlob}, + wantRPC: true, + }, + { + name: "multiple blobs", + blobs: []*Blob{ + validBlob, + { + Namespace: validNamespace, + Data: []byte("more data"), + }, + }, + wantRPC: true, + }, + { + name: "empty list delegates to celestia-node", + blobs: []*Blob{}, + wantRPC: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer client.Close() + + _, err = client.Submit(ctx, tt.blobs, nil) + + if tt.wantRPC { + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to submit blobs") + } + }) + } +} + +func TestClient_UnimplementedMethods(t *testing.T) { logger := zerolog.Nop() ctx := context.Background() @@ -112,13 +166,6 @@ func TestClient_BlobAPIMethods(t *testing.T) { require.NotNil(t, client) defer client.Close() - // Test that all native blob API methods exist and return "not implemented" for now - t.Run("Submit", func(t *testing.T) { - _, err := client.Submit(ctx, nil, nil) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not implemented") - }) - t.Run("Get", func(t *testing.T) { _, err := client.Get(ctx, 0, nil, nil) assert.Error(t, err) From acf4ceebf831cc62be21f00050c78c4a1b8279e0 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Tue, 18 Nov 2025 00:15:25 +0100 Subject: [PATCH 03/35] feat(da): implement remaining blob API methods Implement Get, GetAll, GetProof, and Included methods for the Celestia blob API client. All methods delegate to celestia-node via JSON-RPC with appropriate logging. --- da/celestia/client.go | 82 ++++++++++++++++++++++++++++++++++++-- da/celestia/client_test.go | 75 ++++++++++++++++++++++++---------- 2 files changed, 131 insertions(+), 26 deletions(-) diff --git a/da/celestia/client.go b/da/celestia/client.go index a8a59eb3a1..3a1923f3d5 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -106,20 +106,94 @@ func (c *Client) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) // Get retrieves a single blob by commitment at a given height and namespace. func (c *Client) Get(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Blob, error) { - return nil, fmt.Errorf("not implemented yet") + c.logger.Debug(). + Uint64("height", height). + Msg("Getting blob from Celestia") + + blob, err := c.Internal.Get(ctx, height, namespace, commitment) + if err != nil { + c.logger.Error(). + Err(err). + Uint64("height", height). + Msg("Failed to get blob") + return nil, fmt.Errorf("failed to get blob: %w", err) + } + + c.logger.Debug(). + Uint64("height", height). + Int("data_size", len(blob.Data)). + Msg("Successfully retrieved blob") + + return blob, nil } // GetAll retrieves all blobs at a given height for the specified namespaces. func (c *Client) GetAll(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) { - return nil, fmt.Errorf("not implemented yet") + c.logger.Debug(). + Uint64("height", height). + Int("num_namespaces", len(namespaces)). + Msg("Getting all blobs from Celestia") + + blobs, err := c.Internal.GetAll(ctx, height, namespaces) + if err != nil { + c.logger.Error(). + Err(err). + Uint64("height", height). + Int("num_namespaces", len(namespaces)). + Msg("Failed to get blobs") + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + + c.logger.Debug(). + Uint64("height", height). + Int("num_blobs", len(blobs)). + Msg("Successfully retrieved blobs") + + return blobs, nil } // GetProof retrieves the inclusion proof for a blob. func (c *Client) GetProof(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Proof, error) { - return nil, fmt.Errorf("not implemented yet") + c.logger.Debug(). + Uint64("height", height). + Msg("Getting proof from Celestia") + + proof, err := c.Internal.GetProof(ctx, height, namespace, commitment) + if err != nil { + c.logger.Error(). + Err(err). + Uint64("height", height). + Msg("Failed to get proof") + return nil, fmt.Errorf("failed to get proof: %w", err) + } + + c.logger.Debug(). + Uint64("height", height). + Int("proof_size", len(proof.Data)). + Msg("Successfully retrieved proof") + + return proof, nil } // Included checks whether a blob is included in the Celestia block. func (c *Client) Included(ctx context.Context, height uint64, namespace Namespace, proof *Proof, commitment Commitment) (bool, error) { - return false, fmt.Errorf("not implemented yet") + c.logger.Debug(). + Uint64("height", height). + Msg("Checking blob inclusion in Celestia") + + included, err := c.Internal.Included(ctx, height, namespace, proof, commitment) + if err != nil { + c.logger.Error(). + Err(err). + Uint64("height", height). + Msg("Failed to check inclusion") + return false, fmt.Errorf("failed to check inclusion: %w", err) + } + + c.logger.Debug(). + Uint64("height", height). + Bool("included", included). + Msg("Inclusion check completed") + + return included, nil } diff --git a/da/celestia/client_test.go b/da/celestia/client_test.go index ceccc7e98b..ee56c528e4 100644 --- a/da/celestia/client_test.go +++ b/da/celestia/client_test.go @@ -157,36 +157,67 @@ func TestClient_Submit(t *testing.T) { } } -func TestClient_UnimplementedMethods(t *testing.T) { +func TestClient_Get(t *testing.T) { logger := zerolog.Nop() ctx := context.Background() client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) require.NoError(t, err) - require.NotNil(t, client) defer client.Close() - t.Run("Get", func(t *testing.T) { - _, err := client.Get(ctx, 0, nil, nil) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not implemented") - }) + validNamespace := make([]byte, 29) + validCommitment := []byte("commitment") - t.Run("GetAll", func(t *testing.T) { - _, err := client.GetAll(ctx, 0, nil) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not implemented") - }) + _, err = client.Get(ctx, 100, validNamespace, validCommitment) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get blob") +} - t.Run("GetProof", func(t *testing.T) { - _, err := client.GetProof(ctx, 0, nil, nil) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not implemented") - }) +func TestClient_GetAll(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() - t.Run("Included", func(t *testing.T) { - _, err := client.Included(ctx, 0, nil, nil, nil) - assert.Error(t, err) - assert.Contains(t, err.Error(), "not implemented") - }) + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer client.Close() + + validNamespace := make([]byte, 29) + namespaces := []Namespace{validNamespace} + + _, err = client.GetAll(ctx, 100, namespaces) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get blobs") +} + +func TestClient_GetProof(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer client.Close() + + validNamespace := make([]byte, 29) + validCommitment := []byte("commitment") + + _, err = client.GetProof(ctx, 100, validNamespace, validCommitment) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get proof") +} + +func TestClient_Included(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + client, err := NewClient(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer client.Close() + + validNamespace := make([]byte, 29) + validCommitment := []byte("commitment") + proof := &Proof{Data: []byte("proof")} + + _, err = client.Included(ctx, 100, validNamespace, proof, validCommitment) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to check inclusion") } From 5ab44eb1ce73228ff6de8e79d78d97b815550b93 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Tue, 18 Nov 2025 00:20:49 +0100 Subject: [PATCH 04/35] feat(da): add DA adapter for Celestia blob API Add temporary adapter that implements da.DA interface using the native Celestia blob API client. This allows ev-node to use the new Celestia client while maintaining compatibility with existing code. The adapter bridges the gap between: - Celestia's height-based blob API - ev-node's ID-based DA abstraction IDs are encoded as [height (8 bytes)][commitment] for compatibility. --- da/celestia/adapter.go | 255 ++++++++++++++++++++++++++++++++++++ da/celestia/adapter_test.go | 142 ++++++++++++++++++++ 2 files changed, 397 insertions(+) create mode 100644 da/celestia/adapter.go create mode 100644 da/celestia/adapter_test.go diff --git a/da/celestia/adapter.go b/da/celestia/adapter.go new file mode 100644 index 0000000000..472df22283 --- /dev/null +++ b/da/celestia/adapter.go @@ -0,0 +1,255 @@ +package celestia + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/evstack/ev-node/core/da" + "github.com/rs/zerolog" +) + +// Adapter wraps the Celestia client to implement the da.DA interface. +// This is a temporary bridge to allow ev-node to use the native Celestia blob API +// while maintaining compatibility with the existing DA abstraction. +type Adapter struct { + client *Client + logger zerolog.Logger + maxBlobSize uint64 +} + +// NewAdapter creates a new adapter that implements da.DA interface. +func NewAdapter( + ctx context.Context, + logger zerolog.Logger, + addr string, + token string, + maxBlobSize uint64, +) (*Adapter, error) { + client, err := NewClient(ctx, logger, addr, token, maxBlobSize) + if err != nil { + return nil, err + } + + return &Adapter{ + client: client, + logger: logger, + maxBlobSize: maxBlobSize, + }, nil +} + +// Close closes the underlying client connection. +func (a *Adapter) Close() { + a.client.Close() +} + +// Submit submits blobs to Celestia and returns IDs. +func (a *Adapter) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error) { + return a.SubmitWithOptions(ctx, blobs, gasPrice, namespace, nil) +} + +// SubmitWithOptions submits blobs to Celestia with additional options. +func (a *Adapter) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { + if len(blobs) == 0 { + return []da.ID{}, nil + } + + // Validate namespace + if err := ValidateNamespace(namespace); err != nil { + return nil, fmt.Errorf("invalid namespace: %w", err) + } + + // Convert blobs to Celestia format + celestiaBlobs := make([]*Blob, len(blobs)) + for i, blob := range blobs { + celestiaBlobs[i] = &Blob{ + Namespace: namespace, + Data: blob, + } + } + + // Parse submit options if provided + var opts *SubmitOptions + if len(options) > 0 { + opts = &SubmitOptions{} + if err := json.Unmarshal(options, opts); err != nil { + return nil, fmt.Errorf("failed to unmarshal submit options: %w", err) + } + opts.Fee = gasPrice + } else { + opts = &SubmitOptions{Fee: gasPrice} + } + + height, err := a.client.Submit(ctx, celestiaBlobs, opts) + if err != nil { + if strings.Contains(err.Error(), "timeout") { + return nil, da.ErrTxTimedOut + } + if strings.Contains(err.Error(), "too large") || strings.Contains(err.Error(), "exceeds") { + return nil, da.ErrBlobSizeOverLimit + } + return nil, err + } + + // Create IDs from height and commitments + ids := make([]da.ID, len(celestiaBlobs)) + for i, blob := range celestiaBlobs { + ids[i] = makeID(height, blob.Commitment) + } + + return ids, nil +} + +// Get retrieves blobs by their IDs. +func (a *Adapter) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { + if len(ids) == 0 { + return []da.Blob{}, nil + } + + // Group IDs by height for efficient retrieval + type blobKey struct { + height uint64 + commitment string + } + heightGroups := make(map[uint64][]Commitment) + idToIndex := make(map[blobKey]int) + + for i, id := range ids { + height, commitment, err := splitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + heightGroups[height] = append(heightGroups[height], commitment) + idToIndex[blobKey{height, string(commitment)}] = i + } + + // Retrieve blobs for each height + result := make([]da.Blob, len(ids)) + for height := range heightGroups { + blobs, err := a.client.GetAll(ctx, height, []Namespace{namespace}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, da.ErrBlobNotFound + } + return nil, fmt.Errorf("failed to get blobs at height %d: %w", height, err) + } + + // Match blobs to their original positions + for _, blob := range blobs { + key := blobKey{height, string(blob.Commitment)} + if idx, ok := idToIndex[key]; ok { + result[idx] = blob.Data + } + } + } + + return result, nil +} + +// GetIDs returns all blob IDs at the given height. +func (a *Adapter) GetIDs(ctx context.Context, height uint64, namespace []byte) (*da.GetIDsResult, error) { + blobs, err := a.client.GetAll(ctx, height, []Namespace{namespace}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, da.ErrBlobNotFound + } + if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { + return nil, da.ErrHeightFromFuture + } + return nil, err + } + + if len(blobs) == 0 { + return nil, da.ErrBlobNotFound + } + + ids := make([]da.ID, len(blobs)) + for i, blob := range blobs { + ids[i] = makeID(height, blob.Commitment) + } + + return &da.GetIDsResult{ + IDs: ids, + Timestamp: time.Now(), + }, nil +} + +// GetProofs retrieves inclusion proofs for the given IDs. +func (a *Adapter) GetProofs(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Proof, error) { + if len(ids) == 0 { + return []da.Proof{}, nil + } + + proofs := make([]da.Proof, len(ids)) + for i, id := range ids { + height, commitment, err := splitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + + proof, err := a.client.GetProof(ctx, height, namespace, commitment) + if err != nil { + return nil, fmt.Errorf("failed to get proof for ID %d: %w", i, err) + } + + proofs[i] = proof.Data + } + + return proofs, nil +} + +// Commit creates commitments for the given blobs. +// Note: Celestia generates commitments automatically during submission, +// so this is a no-op that returns nil commitments. +func (a *Adapter) Commit(ctx context.Context, blobs []da.Blob, namespace []byte) ([]da.Commitment, error) { + commitments := make([]da.Commitment, len(blobs)) + for i := range blobs { + commitments[i] = nil + } + return commitments, nil +} + +// Validate validates commitments against proofs. +func (a *Adapter) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, namespace []byte) ([]bool, error) { + if len(ids) != len(proofs) { + return nil, fmt.Errorf("mismatched lengths: %d IDs vs %d proofs", len(ids), len(proofs)) + } + + results := make([]bool, len(ids)) + for i, id := range ids { + height, commitment, err := splitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + + proof := &Proof{Data: proofs[i]} + included, err := a.client.Included(ctx, height, namespace, proof, commitment) + if err != nil { + return nil, fmt.Errorf("failed to validate proof %d: %w", i, err) + } + + results[i] = included + } + + return results, nil +} + +// makeID creates an ID from a height and a commitment. +func makeID(height uint64, commitment []byte) []byte { + id := make([]byte, len(commitment)+8) + binary.LittleEndian.PutUint64(id, height) + copy(id[8:], commitment) + return id +} + +// splitID splits an ID into a height and a commitment. +func splitID(id []byte) (uint64, []byte, error) { + if len(id) <= 8 { + return 0, nil, fmt.Errorf("invalid ID length: %d", len(id)) + } + commitment := id[8:] + return binary.LittleEndian.Uint64(id[:8]), commitment, nil +} diff --git a/da/celestia/adapter_test.go b/da/celestia/adapter_test.go new file mode 100644 index 0000000000..795d4a2179 --- /dev/null +++ b/da/celestia/adapter_test.go @@ -0,0 +1,142 @@ +package celestia + +import ( + "context" + "testing" + + "github.com/evstack/ev-node/core/da" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewAdapter(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + tests := []struct { + name string + addr string + token string + maxBlobSize uint64 + wantErr bool + }{ + { + name: "valid parameters", + addr: "http://localhost:26658", + token: "test-token", + maxBlobSize: 1024 * 1024, + wantErr: false, + }, + { + name: "empty address", + addr: "", + token: "test-token", + maxBlobSize: 1024, + wantErr: true, + }, + { + name: "zero maxBlobSize", + addr: "http://localhost:26658", + token: "test-token", + maxBlobSize: 0, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + adapter, err := NewAdapter(ctx, logger, tt.addr, tt.token, tt.maxBlobSize) + + if tt.wantErr { + require.Error(t, err) + assert.Nil(t, adapter) + } else { + require.NoError(t, err) + require.NotNil(t, adapter) + adapter.Close() + } + }) + } +} + +func TestAdapter_Submit(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer adapter.Close() + + validNamespace := make([]byte, 29) + blobs := []da.Blob{[]byte("test data")} + + _, err = adapter.Submit(ctx, blobs, 0.002, validNamespace) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to submit blobs") +} + +func TestAdapter_SubmitWithInvalidNamespace(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer adapter.Close() + + invalidNamespace := make([]byte, 10) + blobs := []da.Blob{[]byte("test data")} + + _, err = adapter.Submit(ctx, blobs, 0.002, invalidNamespace) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid namespace") +} + +func TestAdapter_Get(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer adapter.Close() + + validNamespace := make([]byte, 29) + testID := makeID(100, []byte("test-commitment")) + + _, err = adapter.Get(ctx, []da.ID{testID}, validNamespace) + require.Error(t, err) +} + +func TestAdapter_GetIDs(t *testing.T) { + logger := zerolog.Nop() + ctx := context.Background() + + adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) + require.NoError(t, err) + defer adapter.Close() + + validNamespace := make([]byte, 29) + + _, err = adapter.GetIDs(ctx, 100, validNamespace) + require.Error(t, err) +} + +func TestMakeIDAndSplitID(t *testing.T) { + height := uint64(12345) + commitment := []byte("test-commitment-data") + + id := makeID(height, commitment) + + retrievedHeight, retrievedCommitment, err := splitID(id) + require.NoError(t, err) + assert.Equal(t, height, retrievedHeight) + assert.Equal(t, commitment, retrievedCommitment) +} + +func TestSplitID_InvalidID(t *testing.T) { + shortID := []byte("short") + + _, _, err := splitID(shortID) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid ID length") +} From 657b9e8a1edbed8c8761ee684621748d0b14cda3 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Tue, 18 Nov 2025 00:21:57 +0100 Subject: [PATCH 05/35] refactor(testapp): use Celestia blob API adapter Replace jsonrpc.NewClient with the new Celestia blob API adapter in testapp initialization. This uses the native Celestia blob API instead of the deprecated DA API. --- apps/testapp/cmd/run.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index c72d220cdd..182ed381a2 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -9,7 +9,7 @@ import ( kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/da/jsonrpc" + celestiada "github.com/evstack/ev-node/da/celestia" "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" genesispkg "github.com/evstack/ev-node/pkg/genesis" @@ -51,10 +51,11 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daAdapter, err := celestiada.NewAdapter(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } + defer daAdapter.Close() nodeKey, err := key.LoadNodeKey(filepath.Dir(nodeConfig.ConfigPath())) if err != nil { @@ -96,7 +97,7 @@ var RunCmd = &cobra.Command{ ctx, logger, datastore, - &daJrpc.DA, + daAdapter, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -111,6 +112,6 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daAdapter, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } From dbfa3b26787a8eea2c45507c95c0caf3979fbfb8 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Tue, 18 Nov 2025 00:36:03 +0100 Subject: [PATCH 06/35] refactor(apps): use Celestia blob API adapter in evm and grpc Replace jsonrpc.NewClient with the new Celestia blob API adapter in evm and grpc app initialization. All apps now use the native Celestia blob API instead of the deprecated DA API. --- apps/evm/single/cmd/run.go | 12 +++++++----- apps/grpc/single/cmd/run.go | 9 +++++---- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index 500107af66..77a0d34533 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -8,7 +8,7 @@ import ( "path/filepath" "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/da/jsonrpc" + celestiada "github.com/evstack/ev-node/da/celestia" "github.com/evstack/ev-node/node" "github.com/evstack/ev-node/sequencers/single" @@ -53,10 +53,12 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daJrpc, err := jsonrpc.NewClient(context.Background(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + ctx := context.Background() + daAdapter, err := celestiada.NewAdapter(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } + defer daAdapter.Close() datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "evm-single") if err != nil { @@ -79,10 +81,10 @@ var RunCmd = &cobra.Command{ } sequencer, err := single.NewSequencer( - context.Background(), + ctx, logger, datastore, - &daJrpc.DA, + daAdapter, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -102,7 +104,7 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daAdapter, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index cef7e092e0..5eb9d8c1e8 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -8,7 +8,7 @@ import ( "github.com/evstack/ev-node/core/da" "github.com/evstack/ev-node/core/execution" - "github.com/evstack/ev-node/da/jsonrpc" + celestiada "github.com/evstack/ev-node/da/celestia" executiongrpc "github.com/evstack/ev-node/execution/grpc" "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" @@ -52,10 +52,11 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") // Create DA client - daJrpc, err := jsonrpc.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daAdapter, err := celestiada.NewAdapter(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } + defer daAdapter.Close() // Create datastore datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "grpc-single") @@ -84,7 +85,7 @@ The execution client must implement the Evolve execution gRPC interface.`, cmd.Context(), logger, datastore, - &daJrpc.DA, + daAdapter, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -107,7 +108,7 @@ The execution client must implement the Evolve execution gRPC interface.`, } // Start the node - return rollcmd.StartNode(logger, cmd, executor, sequencer, &daJrpc.DA, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daAdapter, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } From 1616a6f53332654e980ca4623f67e804258765c4 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Mon, 24 Nov 2025 12:19:29 +0100 Subject: [PATCH 07/35] refactor: remove generic JSON-RPC DA wrapper and da-debug tool Remove the generic JSON-RPC DA abstraction layer now that all applications use the Celestia blob API adapter directly. Changes: - Remove da/jsonrpc package (client, server, tests) - Remove tools/da-debug debugging tool - Embed JSON-RPC server directly in local-da for testing - Update documentation to reflect Celestia as the DA implementation - Remove da-debug from tools.mk build configuration --- da/celestia/client.go | 8 +- da/celestia/client_test.go | 4 +- da/cmd/local-da/main.go | 4 +- da/{jsonrpc => cmd/local-da}/server.go | 25 +- da/jsonrpc/client.go | 241 ----------- da/jsonrpc/client_test.go | 125 ------ da/jsonrpc/errors.go | 21 - da/jsonrpc/proxy_test.go | 351 ---------------- docs/learn/specs/da.md | 15 +- tools/da-debug/README.md | 96 ----- tools/da-debug/go.mod | 58 --- tools/da-debug/go.sum | 418 ------------------- tools/da-debug/main.go | 557 ------------------------- tools/da-debug/main_test.go | 216 ---------- tools/tools.mk | 17 +- 15 files changed, 31 insertions(+), 2125 deletions(-) rename da/{jsonrpc => cmd/local-da}/server.go (81%) delete mode 100644 da/jsonrpc/client.go delete mode 100644 da/jsonrpc/client_test.go delete mode 100644 da/jsonrpc/errors.go delete mode 100644 da/jsonrpc/proxy_test.go delete mode 100644 tools/da-debug/README.md delete mode 100644 tools/da-debug/go.mod delete mode 100644 tools/da-debug/go.sum delete mode 100644 tools/da-debug/main.go delete mode 100644 tools/da-debug/main_test.go diff --git a/da/celestia/client.go b/da/celestia/client.go index 3a1923f3d5..96da15a71d 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -16,10 +16,10 @@ type Client struct { closer jsonrpc.ClientCloser Internal struct { - Submit func(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) `perm:"write"` - Get func(ctx context.Context, height uint64, ns Namespace, c Commitment) (*Blob, error) `perm:"read"` - GetAll func(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) `perm:"read"` - GetProof func(ctx context.Context, height uint64, ns Namespace, c Commitment) (*Proof, error) `perm:"read"` + Submit func(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) `perm:"write"` + Get func(ctx context.Context, height uint64, ns Namespace, c Commitment) (*Blob, error) `perm:"read"` + GetAll func(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) `perm:"read"` + GetProof func(ctx context.Context, height uint64, ns Namespace, c Commitment) (*Proof, error) `perm:"read"` Included func(ctx context.Context, height uint64, ns Namespace, proof *Proof, c Commitment) (bool, error) `perm:"read"` } } diff --git a/da/celestia/client_test.go b/da/celestia/client_test.go index ee56c528e4..60954ff39a 100644 --- a/da/celestia/client_test.go +++ b/da/celestia/client_test.go @@ -114,8 +114,8 @@ func TestClient_Submit(t *testing.T) { } tests := []struct { - name string - blobs []*Blob + name string + blobs []*Blob wantRPC bool }{ { diff --git a/da/cmd/local-da/main.go b/da/cmd/local-da/main.go index 5823b77156..b861d35a40 100644 --- a/da/cmd/local-da/main.go +++ b/da/cmd/local-da/main.go @@ -9,8 +9,6 @@ import ( "syscall" "github.com/rs/zerolog" - - proxy "github.com/evstack/ev-node/da/jsonrpc" ) const ( @@ -46,7 +44,7 @@ func main() { } da := NewLocalDA(logger, opts...) - srv := proxy.NewServer(logger, host, port, da) + srv := NewServer(logger, host, port, da) logger.Info().Str("host", host).Str("port", port).Uint64("maxBlobSize", maxBlobSize).Msg("Listening on") if err := srv.Start(context.Background()); err != nil { logger.Error().Err(err).Msg("error while serving") diff --git a/da/jsonrpc/server.go b/da/cmd/local-da/server.go similarity index 81% rename from da/jsonrpc/server.go rename to da/cmd/local-da/server.go index 456eefe908..9066f01e72 100644 --- a/da/jsonrpc/server.go +++ b/da/cmd/local-da/server.go @@ -1,4 +1,4 @@ -package jsonrpc +package main import ( "context" @@ -13,7 +13,7 @@ import ( "github.com/evstack/ev-node/core/da" ) -// Server is a jsonrpc service that can serve the DA interface +// Server is a jsonrpc service that serves the LocalDA implementation type Server struct { logger zerolog.Logger srv *http.Server @@ -60,7 +60,7 @@ func (s *serverInternalAPI) Validate(ctx context.Context, ids []da.ID, proofs [] return s.daImpl.Validate(ctx, ids, proofs, ns) } -// Submit implements the RPC method. This is the primary submit method which includes options. +// Submit implements the RPC method. func (s *serverInternalAPI) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { s.logger.Debug().Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", string(ns)).Msg("RPC server: Submit called") return s.daImpl.Submit(ctx, blobs, gasPrice, ns) @@ -72,7 +72,20 @@ func (s *serverInternalAPI) SubmitWithOptions(ctx context.Context, blobs []da.Bl return s.daImpl.SubmitWithOptions(ctx, blobs, gasPrice, ns, options) } -// NewServer accepts the host address port and the DA implementation to serve as a jsonrpc service +func getKnownErrorsMapping() jsonrpc.Errors { + errs := jsonrpc.NewErrors() + errs.Register(jsonrpc.ErrorCode(da.StatusNotFound), &da.ErrBlobNotFound) + errs.Register(jsonrpc.ErrorCode(da.StatusTooBig), &da.ErrBlobSizeOverLimit) + errs.Register(jsonrpc.ErrorCode(da.StatusContextDeadline), &da.ErrTxTimedOut) + errs.Register(jsonrpc.ErrorCode(da.StatusAlreadyInMempool), &da.ErrTxAlreadyInMempool) + errs.Register(jsonrpc.ErrorCode(da.StatusIncorrectAccountSequence), &da.ErrTxIncorrectAccountSequence) + errs.Register(jsonrpc.ErrorCode(da.StatusContextDeadline), &da.ErrContextDeadline) + errs.Register(jsonrpc.ErrorCode(da.StatusContextCanceled), &da.ErrContextCanceled) + errs.Register(jsonrpc.ErrorCode(da.StatusHeightFromFuture), &da.ErrHeightFromFuture) + return errs +} + +// NewServer creates a new JSON-RPC server for the LocalDA implementation func NewServer(logger zerolog.Logger, address, port string, daImplementation da.DA) *Server { rpc := jsonrpc.NewServer(jsonrpc.WithServerErrors(getKnownErrorsMapping())) srv := &Server{ @@ -96,8 +109,6 @@ func NewServer(logger zerolog.Logger, address, port string, daImplementation da. } // Start starts the RPC Server. -// This function can be called multiple times concurrently -// Once started, subsequent calls are a no-op func (s *Server) Start(context.Context) error { couldStart := s.started.CompareAndSwap(false, true) @@ -117,8 +128,6 @@ func (s *Server) Start(context.Context) error { } // Stop stops the RPC Server. -// This function can be called multiple times concurrently -// Once stopped, subsequent calls are a no-op func (s *Server) Stop(ctx context.Context) error { couldStop := s.started.CompareAndSwap(true, false) if !couldStop { diff --git a/da/jsonrpc/client.go b/da/jsonrpc/client.go deleted file mode 100644 index 9803ebcd49..0000000000 --- a/da/jsonrpc/client.go +++ /dev/null @@ -1,241 +0,0 @@ -package jsonrpc - -import ( - "context" - "encoding/hex" - "fmt" - "net/http" - "strings" - - "github.com/filecoin-project/go-jsonrpc" - "github.com/rs/zerolog" - - "github.com/evstack/ev-node/core/da" -) - -//go:generate mockgen -destination=mocks/api.go -package=mocks . Module -type Module interface { - da.DA -} - -// API defines the jsonrpc service module API -type API struct { - Logger zerolog.Logger - MaxBlobSize uint64 - Internal struct { - Get func(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) `perm:"read"` - GetIDs func(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) `perm:"read"` - GetProofs func(ctx context.Context, ids []da.ID, ns []byte) ([]da.Proof, error) `perm:"read"` - Commit func(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) `perm:"read"` - Validate func(context.Context, []da.ID, []da.Proof, []byte) ([]bool, error) `perm:"read"` - Submit func(context.Context, []da.Blob, float64, []byte) ([]da.ID, error) `perm:"write"` - SubmitWithOptions func(context.Context, []da.Blob, float64, []byte, []byte) ([]da.ID, error) `perm:"write"` - } -} - -// Get returns Blob for each given ID, or an error. -func (api *API) Get(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) { - api.Logger.Debug().Str("method", "Get").Int("num_ids", len(ids)).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.Get(ctx, ids, ns) - if err != nil { - if strings.Contains(err.Error(), context.Canceled.Error()) { - api.Logger.Debug().Str("method", "Get").Msg("RPC call canceled due to context cancellation") - return res, context.Canceled - } - api.Logger.Error().Err(err).Str("method", "Get").Msg("RPC call failed") - // Wrap error for context, potentially using the translated error from the RPC library - return nil, fmt.Errorf("failed to get blobs: %w", err) - } - api.Logger.Debug().Str("method", "Get").Int("num_blobs_returned", len(res)).Msg("RPC call successful") - return res, nil -} - -// GetIDs returns IDs of all Blobs located in DA at given height. -func (api *API) GetIDs(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) { - api.Logger.Debug().Str("method", "GetIDs").Uint64("height", height).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.GetIDs(ctx, height, ns) - if err != nil { - // Using strings.contains since JSON RPC serialization doesn't preserve error wrapping - // Check if the error is specifically BlobNotFound, otherwise log and return - if strings.Contains(err.Error(), da.ErrBlobNotFound.Error()) { // Use the error variable directly - api.Logger.Debug().Str("method", "GetIDs").Uint64("height", height).Msg("RPC call indicates blobs not found") - return nil, err // Return the specific ErrBlobNotFound - } - if strings.Contains(err.Error(), da.ErrHeightFromFuture.Error()) { - api.Logger.Debug().Str("method", "GetIDs").Uint64("height", height).Msg("RPC call indicates height from future") - return nil, err // Return the specific ErrHeightFromFuture - } - if strings.Contains(err.Error(), context.Canceled.Error()) { - api.Logger.Debug().Str("method", "GetIDs").Msg("RPC call canceled due to context cancellation") - return res, context.Canceled - } - api.Logger.Error().Err(err).Str("method", "GetIDs").Msg("RPC call failed") - return nil, err - } - - // Handle cases where the RPC call succeeds but returns no IDs - if res == nil || len(res.IDs) == 0 { - api.Logger.Debug().Str("method", "GetIDs").Uint64("height", height).Msg("RPC call successful but no IDs found") - return nil, da.ErrBlobNotFound // Return specific error for not found (use variable directly) - } - - api.Logger.Debug().Str("method", "GetIDs").Msg("RPC call successful") - return res, nil -} - -// GetProofs returns inclusion Proofs for Blobs specified by their IDs. -func (api *API) GetProofs(ctx context.Context, ids []da.ID, ns []byte) ([]da.Proof, error) { - api.Logger.Debug().Str("method", "GetProofs").Int("num_ids", len(ids)).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.GetProofs(ctx, ids, ns) - if err != nil { - api.Logger.Error().Err(err).Str("method", "GetProofs").Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "GetProofs").Int("num_proofs_returned", len(res)).Msg("RPC call successful") - } - return res, err -} - -// Commit creates a Commitment for each given Blob. -func (api *API) Commit(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) { - api.Logger.Debug().Str("method", "Commit").Int("num_blobs", len(blobs)).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.Commit(ctx, blobs, ns) - if err != nil { - api.Logger.Error().Err(err).Str("method", "Commit").Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "Commit").Int("num_commitments_returned", len(res)).Msg("RPC call successful") - } - return res, err -} - -// Validate validates Commitments against the corresponding Proofs. This should be possible without retrieving the Blobs. -func (api *API) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, ns []byte) ([]bool, error) { - api.Logger.Debug().Str("method", "Validate").Int("num_ids", len(ids)).Int("num_proofs", len(proofs)).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.Validate(ctx, ids, proofs, ns) - if err != nil { - api.Logger.Error().Err(err).Str("method", "Validate").Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "Validate").Int("num_results_returned", len(res)).Msg("RPC call successful") - } - return res, err -} - -// Submit submits the Blobs to Data Availability layer. -func (api *API) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { - api.Logger.Debug().Str("method", "Submit").Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.Submit(ctx, blobs, gasPrice, ns) - if err != nil { - if strings.Contains(err.Error(), context.Canceled.Error()) { - api.Logger.Debug().Str("method", "Submit").Msg("RPC call canceled due to context cancellation") - return res, context.Canceled - } - api.Logger.Error().Err(err).Str("method", "Submit").Bytes("namespace", ns).Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "Submit").Int("num_ids_returned", len(res)).Msg("RPC call successful") - } - return res, err -} - -// SubmitWithOptions submits the Blobs to Data Availability layer with additional options. -// It validates the entire batch against MaxBlobSize before submission. -// If any blob or the total batch size exceeds limits, it returns ErrBlobSizeOverLimit. -func (api *API) SubmitWithOptions(ctx context.Context, inputBlobs []da.Blob, gasPrice float64, ns []byte, options []byte) ([]da.ID, error) { - maxBlobSize := api.MaxBlobSize - - if len(inputBlobs) == 0 { - return []da.ID{}, nil - } - - // Validate each blob individually and calculate total size - var totalSize uint64 - for i, blob := range inputBlobs { - blobLen := uint64(len(blob)) - if blobLen > maxBlobSize { - api.Logger.Warn().Int("index", i).Uint64("blobSize", blobLen).Uint64("maxBlobSize", maxBlobSize).Msg("Individual blob exceeds MaxBlobSize") - return nil, da.ErrBlobSizeOverLimit - } - totalSize += blobLen - } - - // Validate total batch size - if totalSize > maxBlobSize { - return nil, da.ErrBlobSizeOverLimit - } - - api.Logger.Debug().Str("method", "SubmitWithOptions").Int("num_blobs", len(inputBlobs)).Uint64("total_size", totalSize).Float64("gas_price", gasPrice).Str("namespace", hex.EncodeToString(ns)).Msg("Making RPC call") - res, err := api.Internal.SubmitWithOptions(ctx, inputBlobs, gasPrice, ns, options) - if err != nil { - if strings.Contains(err.Error(), context.Canceled.Error()) { - api.Logger.Debug().Str("method", "SubmitWithOptions").Msg("RPC call canceled due to context cancellation") - return res, context.Canceled - } - api.Logger.Error().Err(err).Str("method", "SubmitWithOptions").Msg("RPC call failed") - } else { - api.Logger.Debug().Str("method", "SubmitWithOptions").Int("num_ids_returned", len(res)).Msg("RPC call successful") - } - - return res, err -} - -// Client is the jsonrpc client -type Client struct { - DA API - closer multiClientCloser -} - -// multiClientCloser is a wrapper struct to close clients across multiple namespaces. -type multiClientCloser struct { - closers []jsonrpc.ClientCloser -} - -// register adds a new closer to the multiClientCloser -func (m *multiClientCloser) register(closer jsonrpc.ClientCloser) { - m.closers = append(m.closers, closer) -} - -// closeAll closes all saved clients. -func (m *multiClientCloser) closeAll() { - for _, closer := range m.closers { - closer() - } -} - -// Close closes the connections to all namespaces registered on the staticClient. -func (c *Client) Close() { - c.closer.closeAll() -} - -// NewClient creates a new Client with one connection per namespace with the -// given token as the authorization token. -func NewClient(ctx context.Context, logger zerolog.Logger, addr, token string, maxBlobSize uint64) (*Client, error) { - authHeader := http.Header{"Authorization": []string{fmt.Sprintf("Bearer %s", token)}} - return newClient(ctx, logger, addr, authHeader, maxBlobSize) -} - -func newClient(ctx context.Context, logger zerolog.Logger, addr string, authHeader http.Header, maxBlobSize uint64) (*Client, error) { - var multiCloser multiClientCloser - var client Client - client.DA.Logger = logger - client.DA.MaxBlobSize = maxBlobSize - - errs := getKnownErrorsMapping() - for name, module := range moduleMap(&client) { - closer, err := jsonrpc.NewMergeClient(ctx, addr, name, []interface{}{module}, authHeader, jsonrpc.WithErrors(errs)) - if err != nil { - // If an error occurs, close any previously opened connections - multiCloser.closeAll() - return nil, err - } - multiCloser.register(closer) - } - - client.closer = multiCloser // Assign the multiCloser to the client - - return &client, nil -} - -func moduleMap(client *Client) map[string]interface{} { - // TODO: this duplication of strings many times across the codebase can be avoided with issue #1176 - return map[string]interface{}{ - "da": &client.DA.Internal, - } -} diff --git a/da/jsonrpc/client_test.go b/da/jsonrpc/client_test.go deleted file mode 100644 index af32882ea9..0000000000 --- a/da/jsonrpc/client_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package jsonrpc - -import ( - "context" - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - - "github.com/evstack/ev-node/core/da" -) - -// TestSubmitWithOptions_SizeValidation tests the corrected behavior of SubmitWithOptions -// where it validates the entire batch before submission and returns ErrBlobSizeOverLimit -// if the batch is too large, instead of silently dropping blobs. -func TestSubmitWithOptions_SizeValidation(t *testing.T) { - logger := zerolog.Nop() - - testCases := []struct { - name string - maxBlobSize uint64 - inputBlobs []da.Blob - expectError bool - expectedError error - description string - }{ - { - name: "Empty input", - maxBlobSize: 1000, - inputBlobs: []da.Blob{}, - expectError: false, - description: "Empty input should return empty result without error", - }, - { - name: "Single blob within limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 500)}, - expectError: false, - description: "Single blob smaller than limit should succeed", - }, - { - name: "Single blob exceeds limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 1500)}, - expectError: true, - expectedError: da.ErrBlobSizeOverLimit, - description: "Single blob larger than limit should fail", - }, - { - name: "Multiple blobs within limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 300), make([]byte, 400), make([]byte, 200)}, - expectError: false, - description: "Multiple blobs totaling less than limit should succeed", - }, - { - name: "Multiple blobs exceed total limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 400), make([]byte, 400), make([]byte, 400)}, - expectError: true, - expectedError: da.ErrBlobSizeOverLimit, - description: "Multiple blobs totaling more than limit should fail completely", - }, - { - name: "Mixed: some blobs fit, total exceeds limit", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 100), make([]byte, 200), make([]byte, 800)}, - expectError: true, - expectedError: da.ErrBlobSizeOverLimit, - description: "Should fail completely, not partially submit blobs that fit", - }, - { - name: "One blob exceeds limit individually", - maxBlobSize: 1000, - inputBlobs: []da.Blob{make([]byte, 300), make([]byte, 1500), make([]byte, 200)}, - expectError: true, - expectedError: da.ErrBlobSizeOverLimit, - description: "Should fail if any individual blob exceeds limit", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Create API with test configuration - api := &API{ - Logger: logger, - MaxBlobSize: tc.maxBlobSize, - } - - // Mock the Internal.SubmitWithOptions to always succeed if called - // This tests that our validation logic works before reaching the actual RPC call - mockCalled := false - api.Internal.SubmitWithOptions = func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { - mockCalled = true - // Return mock IDs for successful submissions - ids := make([]da.ID, len(blobs)) - for i := range blobs { - ids[i] = []byte{byte(i)} - } - return ids, nil - } - - // Call SubmitWithOptions - ctx := context.Background() - result, err := api.SubmitWithOptions(ctx, tc.inputBlobs, 1.0, []byte("test"), nil) - - // Verify expectations - if tc.expectError { - assert.Error(t, err, tc.description) - if tc.expectedError != nil { - assert.ErrorIs(t, err, tc.expectedError, tc.description) - } - assert.Nil(t, result, "Result should be nil on error") - assert.False(t, mockCalled, "Internal RPC should not be called when validation fails") - } else { - assert.NoError(t, err, tc.description) - assert.NotNil(t, result, "Result should not be nil on success") - if len(tc.inputBlobs) > 0 { - assert.True(t, mockCalled, "Internal RPC should be called for valid submissions") - assert.Len(t, result, len(tc.inputBlobs), "Should return IDs for all submitted blobs") - } - } - }) - } -} diff --git a/da/jsonrpc/errors.go b/da/jsonrpc/errors.go deleted file mode 100644 index c81040e899..0000000000 --- a/da/jsonrpc/errors.go +++ /dev/null @@ -1,21 +0,0 @@ -package jsonrpc - -import ( - "github.com/filecoin-project/go-jsonrpc" - - coreda "github.com/evstack/ev-node/core/da" -) - -// getKnownErrorsMapping returns a mapping of known error codes to their corresponding error types. -func getKnownErrorsMapping() jsonrpc.Errors { - errs := jsonrpc.NewErrors() - errs.Register(jsonrpc.ErrorCode(coreda.StatusNotFound), &coreda.ErrBlobNotFound) - errs.Register(jsonrpc.ErrorCode(coreda.StatusTooBig), &coreda.ErrBlobSizeOverLimit) - errs.Register(jsonrpc.ErrorCode(coreda.StatusContextDeadline), &coreda.ErrTxTimedOut) - errs.Register(jsonrpc.ErrorCode(coreda.StatusAlreadyInMempool), &coreda.ErrTxAlreadyInMempool) - errs.Register(jsonrpc.ErrorCode(coreda.StatusIncorrectAccountSequence), &coreda.ErrTxIncorrectAccountSequence) - errs.Register(jsonrpc.ErrorCode(coreda.StatusContextDeadline), &coreda.ErrContextDeadline) - errs.Register(jsonrpc.ErrorCode(coreda.StatusContextCanceled), &coreda.ErrContextCanceled) - errs.Register(jsonrpc.ErrorCode(coreda.StatusHeightFromFuture), &coreda.ErrHeightFromFuture) - return errs -} diff --git a/da/jsonrpc/proxy_test.go b/da/jsonrpc/proxy_test.go deleted file mode 100644 index 5bbce0d2b6..0000000000 --- a/da/jsonrpc/proxy_test.go +++ /dev/null @@ -1,351 +0,0 @@ -package jsonrpc_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "strings" - "sync" - "testing" - "time" - - "github.com/evstack/ev-node/da/internal/mocks" - proxy "github.com/evstack/ev-node/da/jsonrpc" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - coreda "github.com/evstack/ev-node/core/da" -) - -const ( - // ServerHost is the listen host for the test JSONRPC server - ServerHost = "localhost" - // ServerPort is the listen port for the test JSONRPC server - ServerPort = "3450" - // ClientURL is the url to dial for the test JSONRPC client - ClientURL = "http://localhost:3450" - - testMaxBlobSize = 100 - - DefaultMaxBlobSize = 1.5 * 1024 * 1024 // 1.5MB -) - -// testNamespace is a 15-byte namespace that will be hex encoded to 30 chars and truncated to 29 -var testNamespace = []byte("test-namespace1") - -// TestProxy runs the go-da DA test suite against the JSONRPC service -// NOTE: This test requires a test JSONRPC service to run on the port -// 3450 which is chosen to be sufficiently distinct from the default port - -func getTestDABlockTime() time.Duration { - return 100 * time.Millisecond -} - -func TestProxy(t *testing.T) { - dummy := coreda.NewDummyDA(100_000, getTestDABlockTime()) - dummy.StartHeightTicker() - logger := zerolog.Nop() - server := proxy.NewServer(logger, ServerHost, ServerPort, dummy) - err := server.Start(context.Background()) - require.NoError(t, err) - defer func() { - if err := server.Stop(context.Background()); err != nil { - require.NoError(t, err) - } - }() - - client, err := proxy.NewClient(context.Background(), logger, ClientURL, "74657374", DefaultMaxBlobSize) - require.NoError(t, err) - - t.Run("Basic DA test", func(t *testing.T) { - BasicDATest(t, &client.DA) - }) - t.Run("Get IDs and all data", func(t *testing.T) { - GetIDsTest(t, &client.DA) - }) - t.Run("Check Errors", func(t *testing.T) { - CheckErrors(t, &client.DA) - }) - t.Run("Concurrent read/write test", func(t *testing.T) { - ConcurrentReadWriteTest(t, &client.DA) - }) - t.Run("Given height is from the future", func(t *testing.T) { - HeightFromFutureTest(t, &client.DA) - }) - dummy.StopHeightTicker() -} - -// BasicDATest tests round trip of messages to DA and back. -func BasicDATest(t *testing.T, d coreda.DA) { - msg1 := []byte("message 1") - msg2 := []byte("message 2") - - ctx := t.Context() - id1, err := d.Submit(ctx, []coreda.Blob{msg1}, 0, testNamespace) - assert.NoError(t, err) - assert.NotEmpty(t, id1) - - id2, err := d.Submit(ctx, []coreda.Blob{msg2}, 0, testNamespace) - assert.NoError(t, err) - assert.NotEmpty(t, id2) - - time.Sleep(getTestDABlockTime()) - - id3, err := d.SubmitWithOptions(ctx, []coreda.Blob{msg1}, 0, testNamespace, []byte("random options")) - assert.NoError(t, err) - assert.NotEmpty(t, id3) - - assert.NotEqual(t, id1, id2) - assert.NotEqual(t, id1, id3) - - ret, err := d.Get(ctx, id1, testNamespace) - assert.NoError(t, err) - assert.Equal(t, []coreda.Blob{msg1}, ret) - - commitment1, err := d.Commit(ctx, []coreda.Blob{msg1}, []byte{}) - assert.NoError(t, err) - assert.NotEmpty(t, commitment1) - - commitment2, err := d.Commit(ctx, []coreda.Blob{msg2}, []byte{}) - assert.NoError(t, err) - assert.NotEmpty(t, commitment2) - - ids := []coreda.ID{id1[0], id2[0], id3[0]} - proofs, err := d.GetProofs(ctx, ids, testNamespace) - assert.NoError(t, err) - assert.NotEmpty(t, proofs) - oks, err := d.Validate(ctx, ids, proofs, testNamespace) - assert.NoError(t, err) - assert.NotEmpty(t, oks) - for _, ok := range oks { - assert.True(t, ok) - } -} - -// CheckErrors ensures that errors are handled properly by DA. -func CheckErrors(t *testing.T, d coreda.DA) { - ctx := t.Context() - blob, err := d.Get(ctx, []coreda.ID{[]byte("invalid blob id")}, testNamespace) - assert.Error(t, err) - assert.ErrorContains(t, err, coreda.ErrBlobNotFound.Error()) - assert.Empty(t, blob) -} - -// GetIDsTest tests iteration over DA -func GetIDsTest(t *testing.T, d coreda.DA) { - msgs := []coreda.Blob{[]byte("msg1"), []byte("msg2"), []byte("msg3")} - - ctx := t.Context() - ids, err := d.Submit(ctx, msgs, 0, testNamespace) - time.Sleep(getTestDABlockTime()) - assert.NoError(t, err) - assert.Len(t, ids, len(msgs)) - found := false - end := time.Now().Add(1 * time.Second) - - // To Keep It Simple: we assume working with DA used exclusively for this test (mock, devnet, etc) - // As we're the only user, we don't need to handle external data (that could be submitted in real world). - // There is no notion of height, so we need to scan the DA to get test data back. - for i := uint64(1); !found && !time.Now().After(end); i++ { - ret, err := d.GetIDs(ctx, i, testNamespace) - if err != nil { - if strings.Contains(err.Error(), coreda.ErrHeightFromFuture.Error()) { - break - } - t.Error("failed to get IDs:", err) - } - assert.NotNil(t, ret) - assert.NotZero(t, ret.Timestamp) - if len(ret.IDs) > 0 { - blobs, err := d.Get(ctx, ret.IDs, testNamespace) - assert.NoError(t, err) - - // Submit ensures atomicity of batch, so it makes sense to compare actual blobs (bodies) only when lengths - // of slices is the same. - if len(blobs) >= len(msgs) { - found = true - for _, msg := range msgs { - msgFound := false - for _, blob := range blobs { - if bytes.Equal(blob, msg) { - msgFound = true - break - } - } - if !msgFound { - found = false - break - } - } - } - } - } - - assert.True(t, found) -} - -// ConcurrentReadWriteTest tests the use of mutex lock in DummyDA by calling separate methods that use `d.data` and making sure there's no race conditions -func ConcurrentReadWriteTest(t *testing.T, d coreda.DA) { - var wg sync.WaitGroup - ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) - defer cancel() - - writeDone := make(chan struct{}) - - wg.Add(1) - go func() { - defer wg.Done() - for i := uint64(1); i <= 50; i++ { - _, err := d.Submit(ctx, []coreda.Blob{[]byte(fmt.Sprintf("test-%d", i))}, 0, []byte("test")) - assert.NoError(t, err) - } - close(writeDone) - }() - - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-writeDone: - return - default: - _, _ = d.GetIDs(ctx, 0, []byte("test")) - } - } - }() - - wg.Wait() -} - -// HeightFromFutureTest tests the case when the given height is from the future -func HeightFromFutureTest(t *testing.T, d coreda.DA) { - ctx := t.Context() - _, err := d.GetIDs(ctx, 999999999, []byte("test")) - assert.Error(t, err) - // Specifically check if the error contains the error message ErrHeightFromFuture - assert.ErrorContains(t, err, coreda.ErrHeightFromFuture.Error()) -} - -// TestSubmitWithOptions tests the SubmitWithOptions method with various scenarios -func TestSubmitWithOptions(t *testing.T) { - ctx := context.Background() - testNamespace := "options_test" - // The client will convert the namespace string to a proper Celestia namespace - // using SHA256 hashing and version 0 format (1 version byte + 28 ID bytes) - namespace := coreda.NamespaceFromString(testNamespace) - encodedNamespace := namespace.Bytes() - testOptions := []byte("test_options") - gasPrice := 0.0 - - // Helper function to create a client with a mocked internal API - createMockedClient := func(internalAPI *mocks.MockDA) *proxy.Client { - client := &proxy.Client{} - client.DA.Internal.SubmitWithOptions = internalAPI.SubmitWithOptions - client.DA.MaxBlobSize = testMaxBlobSize - client.DA.Logger = zerolog.Nop() - // Test verbosity no longer needed with Nop logger - return client - } - - t.Run("Happy Path - All blobs fit", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - blobs := []coreda.Blob{[]byte("blob1"), []byte("blob2")} - expectedIDs := []coreda.ID{[]byte("id1"), []byte("id2")} - - mockAPI.On("SubmitWithOptions", ctx, blobs, gasPrice, encodedNamespace, testOptions).Return(expectedIDs, nil).Once() - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.NoError(t, err) - assert.Equal(t, expectedIDs, ids) - mockAPI.AssertExpectations(t) - }) - - t.Run("Single Blob Too Large", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - largerBlob := make([]byte, testMaxBlobSize+1) - blobs := []coreda.Blob{largerBlob, []byte("this blob is definitely too large")} - - _, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.Error(t, err) - mockAPI.AssertExpectations(t) - }) - - t.Run("Total Size Exceeded", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - blobsizes := make([]byte, testMaxBlobSize/3) - blobsizesOver := make([]byte, testMaxBlobSize) - - blobs := []coreda.Blob{blobsizes, blobsizes, blobsizesOver} - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.Error(t, err) - assert.ErrorIs(t, err, coreda.ErrBlobSizeOverLimit) - assert.Nil(t, ids) - - // Should not call internal RPC when validation fails - mockAPI.AssertNotCalled(t, "SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) - mockAPI.AssertExpectations(t) - }) - - t.Run("First Blob Too Large", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - largerBlob := make([]byte, testMaxBlobSize+1) - blobs := []coreda.Blob{largerBlob, []byte("small")} - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.Error(t, err) - assert.ErrorIs(t, err, coreda.ErrBlobSizeOverLimit) - assert.Nil(t, ids) - - mockAPI.AssertNotCalled(t, "SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) - mockAPI.AssertExpectations(t) - }) - - t.Run("Empty Input Blobs", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - var blobs []coreda.Blob - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.NoError(t, err) - assert.Empty(t, ids) - - mockAPI.AssertNotCalled(t, "SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything) - mockAPI.AssertExpectations(t) - }) - - t.Run("Error During SubmitWithOptions RPC", func(t *testing.T) { - mockAPI := mocks.NewMockDA(t) - client := createMockedClient(mockAPI) - - blobs := []coreda.Blob{[]byte("blob1")} - expectedError := errors.New("rpc submit failed") - - mockAPI.On("SubmitWithOptions", ctx, blobs, gasPrice, encodedNamespace, testOptions).Return(nil, expectedError).Once() - - ids, err := client.DA.SubmitWithOptions(ctx, blobs, gasPrice, encodedNamespace, testOptions) - - require.Error(t, err) - assert.ErrorIs(t, err, expectedError) - assert.Nil(t, ids) - mockAPI.AssertExpectations(t) - }) -} diff --git a/docs/learn/specs/da.md b/docs/learn/specs/da.md index d9f5ce5da7..0d1229e85f 100644 --- a/docs/learn/specs/da.md +++ b/docs/learn/specs/da.md @@ -1,10 +1,10 @@ # DA -Evolve provides a generic [data availability interface][da-interface] for modular blockchains. Any DA that implements this interface can be used with Evolve. +Evolve uses Celestia as its data availability layer through the [data availability interface][da-interface]. ## Details -`Client` can connect via JSON-RPC transports using Evolve's [jsonrpc][jsonrpc] implementations. The connection can be configured using the following cli flags: +The Celestia DA client connects directly to a Celestia node using the blob API. The connection can be configured using the following cli flags: * `--rollkit.da.address`: url address of the DA service (default: "grpc://localhost:26650") * `--rollkit.da.auth_token`: authentication token of the DA service @@ -21,10 +21,10 @@ Each submission first encodes the headers or data using protobuf (the encoded da To make sure that the serialised blocks don't exceed the underlying DA's blob limits, it fetches the blob size limit by calling `Config` which returns the limit as `uint64` bytes, then includes serialised blocks until the limit is reached. If the limit is reached, it submits the partial set and returns the count of successfully submitted blocks as `SubmittedCount`. The caller should retry with the remaining blocks until all the blocks are submitted. If the first block itself is over the limit, it throws an error. -The `Submit` call may result in an error (`StatusError`) based on the underlying DA implementations on following scenarios: +The `Submit` call may result in an error (`StatusError`) in the following scenarios: -* the total blobs size exceeds the underlying DA's limits (includes empty blobs) -* the implementation specific failures, e.g., for [celestia-da-json-rpc][jsonrpc], invalid namespace, unable to create the commitment or proof, setting low gas price, etc, could return error. +* the total blobs size exceeds Celestia's blob size limits (includes empty blobs) +* Celestia-specific failures, e.g., invalid namespace, unable to create the commitment or proof, setting low gas price, etc. The retrieval process now supports both legacy single-namespace mode and separate namespace mode: @@ -42,7 +42,7 @@ The retrieval process now supports both legacy single-namespace mode and separat If there are no blocks available for a given DA height in any namespace, `StatusNotFound` is returned (which is not an error case). The retrieved blobs are converted back to headers and data, then combined into complete blocks for processing. -Both header/data submission and retrieval operations may be unsuccessful if the DA node and the DA blockchain that the DA implementation is using have failures. For example, failures such as, DA mempool is full, DA submit transaction is nonce clashing with other transaction from the DA submitter account, DA node is not synced, etc. +Both header/data submission and retrieval operations may be unsuccessful if the Celestia node or the Celestia network have failures. For example, mempool is full, transaction nonce conflicts, node is not synced, etc. ## Namespace Separation Benefits @@ -57,7 +57,4 @@ The separation of headers and data into different namespaces provides several ad [1] [da-interface][da-interface] -[2] [jsonrpc][jsonrpc] - [da-interface]: https://github.com/evstack/ev-node/blob/main/core/da/da.go#L11 -[jsonrpc]: https://github.com/evstack/ev-node/tree/main/da/jsonrpc diff --git a/tools/da-debug/README.md b/tools/da-debug/README.md deleted file mode 100644 index 00de6ec4f1..0000000000 --- a/tools/da-debug/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# DA Debug Tool - -A professional debugging tool for querying and inspecting Data Availability (DA) layer data in ev-node. - -## Overview - -The `da-debug` tool provides a command-line interface to interact with DA layers for debugging purposes. It offers two main commands: `query` for inspecting specific DA heights and `search` for finding blobs containing specific blockchain heights. - -## Installation - -Install using `go install`: - -```bash -go install github.com/evstack/ev-node/tools/da-debug@main -``` - -After installation, the `da-debug` binary will be available in your `$GOPATH/bin` directory. - -## Commands - -### Query Command - -Query and decode blobs at a specific DA height and namespace. - -```bash -da-debug query [flags] -``` - -**Flags:** - -- `--filter-height uint`: Filter blobs by specific blockchain height (0 = no filter) - -**Examples:** - -```bash -# Basic query -da-debug query 100 "my-rollup" - -# Query with height filter (only show blobs containing height 50) -da-debug query 100 "my-rollup" --filter-height 50 - -# Query with hex namespace -da-debug query 500 "0x000000000000000000000000000000000000000000000000000000746573743031" -``` - -### Search Command - -Search through multiple DA heights to find blobs containing a specific blockchain height. - -```bash -da-debug search --target-height [flags] -``` - -**Flags:** - -- `--target-height uint`: Target blockchain height to search for (required) -- `--range uint`: Number of DA heights to search (default: 10) - -**Examples:** - -```bash -# Search for blockchain height 1000 starting from DA height 500 -da-debug search 500 "my-rollup" --target-height 1000 - -# Search with custom range of 20 DA heights -da-debug search 500 "my-rollup" --target-height 1000 --range 20 - -# Search with hex namespace -da-debug search 100 "0x000000000000000000000000000000000000000000000000000000746573743031" --target-height 50 --range 5 -``` - -## Global Flags - -All commands support these global flags: - - -- `--da-url string`: DA layer JSON-RPC URL (default: "http://localhost:7980") - -- `--auth-token string`: Authentication token for DA layer -- `--timeout duration`: Request timeout (default: 30s) -- `--verbose`: Enable verbose logging -- `--max-blob-size uint`: Maximum blob size in bytes (default: 1970176) -- `--gas-price float`: Gas price for DA operations (default: 0.0) -- `--gas-multiplier float`: Gas multiplier for DA operations (default: 1.0) -- `--no-color`: Disable colored output - -## Namespace Format - -Namespaces can be provided in two formats: - -1. **Hex String**: A 29-byte hex string (with or without `0x` prefix) - - Example: `0x000000000000000000000000000000000000000000000000000000746573743031` - -2. **String Identifier**: Any string that gets automatically converted to a valid namespace - - Example: `"my-app"` or `"test-namespace"` - - The string is hashed and converted to a valid version 0 namespace diff --git a/tools/da-debug/go.mod b/tools/da-debug/go.mod deleted file mode 100644 index e33b6fea35..0000000000 --- a/tools/da-debug/go.mod +++ /dev/null @@ -1,58 +0,0 @@ -module github.com/evstack/ev-node/tools/da-debug - -go 1.24.6 - -require ( - github.com/evstack/ev-node v1.0.0-beta.6 - github.com/evstack/ev-node/core v1.0.0-beta.4 - github.com/evstack/ev-node/da v1.0.0-beta.4 - github.com/rs/zerolog v1.34.0 - github.com/spf13/cobra v1.10.1 - google.golang.org/protobuf v1.36.9 -) - -require ( - github.com/celestiaorg/go-header v0.7.3 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect - github.com/filecoin-project/go-jsonrpc v0.9.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.3 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.5.0 // indirect - github.com/ipfs/go-log/v2 v2.8.0 // indirect - github.com/klauspost/cpuid/v2 v2.3.0 // indirect - github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/libp2p/go-libp2p v0.43.0 // indirect - github.com/libp2p/go-libp2p-pubsub v0.15.0 // indirect - github.com/libp2p/go-msgio v0.3.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr v0.16.1 // indirect - github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multicodec v0.9.2 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-multistream v0.6.1 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/pflag v1.0.10 // indirect - go.opencensus.io v0.24.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.42.0 // indirect - golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 // indirect - golang.org/x/sys v0.36.0 // indirect - golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect - lukechampine.com/blake3 v1.4.1 // indirect -) - -replace github.com/evstack/ev-node/core => ../../core - -replace github.com/evstack/ev-node/da => ../../da diff --git a/tools/da-debug/go.sum b/tools/da-debug/go.sum deleted file mode 100644 index 257ecf743d..0000000000 --- a/tools/da-debug/go.sum +++ /dev/null @@ -1,418 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= -github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/celestiaorg/go-header v0.7.3 h1:3+kIa+YXT789gPGRh3a55qmdYq3yTTBIqTyum26AvN0= -github.com/celestiaorg/go-header v0.7.3/go.mod h1:eX9iTSPthVEAlEDLux40ZT/olXPGhpxHd+mEzJeDhd0= -github.com/celestiaorg/go-square/v3 v3.0.1 h1:44xnE3AUiZn/3q/uJ0c20AezFS0lywFTGG2lE/9jYKA= -github.com/celestiaorg/go-square/v3 v3.0.1/go.mod h1:Xc4ubl/7pbn/STD7w8Bnk/X1/PG3vk0ycOPW6tMOPX4= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= -github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= -github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evstack/ev-node v1.0.0-beta.6 h1:jjGWAUsjHDpuBjvM7KXnY6Y8uYHM8LOrn0hDrk5zE6E= -github.com/evstack/ev-node v1.0.0-beta.6/go.mod h1:ZABT4xTIg4bINUS08r8e8LFIUk5anWe799fZ320q+Mk= -github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= -github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= -github.com/filecoin-project/go-jsonrpc v0.9.0 h1:G47qEF52w7GholpI21vPSTVBFvsrip6geIoqNiqyZtQ= -github.com/filecoin-project/go-jsonrpc v0.9.0/go.mod h1:OG7kVBVh/AbDFHIwx7Kw0l9ARmKOS6gGOr0LbdBpbLc= -github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= -github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= -github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= -github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= -github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= -github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/boxo v0.33.1 h1:89m+ksw+cYi0ecTNTJ71IRS5ZrLiovmO6XWHIOGhAEg= -github.com/ipfs/boxo v0.33.1/go.mod h1:KwlJTzv5fb1GLlA9KyMqHQmvP+4mrFuiE3PnjdrPJHs= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w= -github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg= -github.com/ipfs/go-log/v2 v2.8.0 h1:SptNTPJQV3s5EF4FdrTu/yVdOKfGbDgn1EBZx4til2o= -github.com/ipfs/go-log/v2 v2.8.0/go.mod h1:2LEEhdv8BGubPeSFTyzbqhCqrwqxCbuTNTLWqgNAipo= -github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= -github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= -github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= -github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= -github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= -github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= -github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= -github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= -github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= -github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU= -github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc= -github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= -github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.34.0 h1:yvJ/Vrt36GVjsqPxiGcuuwOloKuZLV9Aa7awIKyNXy0= -github.com/libp2p/go-libp2p-kad-dht v0.34.0/go.mod h1:JNbkES4W5tajS6uYivw6MPs0842cPHAwhgaPw8sQG4o= -github.com/libp2p/go-libp2p-kbucket v0.7.0 h1:vYDvRjkyJPeWunQXqcW2Z6E93Ywx7fX0jgzb/dGOKCs= -github.com/libp2p/go-libp2p-kbucket v0.7.0/go.mod h1:blOINGIj1yiPYlVEX0Rj9QwEkmVnz3EP8LK1dRKBC6g= -github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= -github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= -github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= -github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= -github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= -github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= -github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= -github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= -github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= -github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= -github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8= -github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE= -github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= -github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= -github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= -github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= -github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= -github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= -github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= -github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= -github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= -github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= -github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= -github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= -github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= -github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= -github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= -github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= -github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.9.2 h1:YrlXCuqxjqm3bXl+vBq5LKz5pz4mvAsugdqy78k0pXQ= -github.com/multiformats/go-multicodec v0.9.2/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= -github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= -github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= -github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= -github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= -github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E= -github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU= -github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= -github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= -github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= -github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= -github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= -github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= -github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= -github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= -github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= -github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= -github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= -github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= -github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= -github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= -github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= -github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= -github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= -github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= -github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= -github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= -github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= -github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= -github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= -github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= -github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= -github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= -github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= -github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= -github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps= -github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs= -github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54= -github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= -github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= -github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg= -github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= -github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= -github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= -github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= -github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= -github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= -github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= -github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= -github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= -go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= -go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= -golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= -google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= -lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= diff --git a/tools/da-debug/main.go b/tools/da-debug/main.go deleted file mode 100644 index 70c81edf64..0000000000 --- a/tools/da-debug/main.go +++ /dev/null @@ -1,557 +0,0 @@ -package main - -import ( - "context" - "encoding/hex" - "fmt" - "os" - "strconv" - "strings" - "time" - - "github.com/rs/zerolog" - "github.com/spf13/cobra" - "google.golang.org/protobuf/proto" - - coreda "github.com/evstack/ev-node/core/da" - "github.com/evstack/ev-node/da/jsonrpc" - "github.com/evstack/ev-node/types" - pb "github.com/evstack/ev-node/types/pb/evnode/v1" -) - -var ( - daURL string - authToken string - timeout time.Duration - verbose bool - maxBlobSize uint64 - filterHeight uint64 -) - -func main() { - rootCmd := &cobra.Command{ - Use: "da-debug", - Short: "DA debugging tool for blockchain data inspection", - Long: `DA Debug Tool -A powerful DA debugging tool for inspecting blockchain data availability layers.`, - } - - // Global flags - rootCmd.PersistentFlags().StringVar(&daURL, "da-url", "http://localhost:7980", "DA layer JSON-RPC URL") - rootCmd.PersistentFlags().StringVar(&authToken, "auth-token", "", "Authentication token for DA layer") - rootCmd.PersistentFlags().DurationVar(&timeout, "timeout", 30*time.Second, "Request timeout") - rootCmd.PersistentFlags().BoolVar(&verbose, "verbose", false, "Enable verbose logging") - rootCmd.PersistentFlags().Uint64Var(&maxBlobSize, "max-blob-size", 1970176, "Maximum blob size in bytes") - - // Add subcommands - rootCmd.AddCommand(queryCmd()) - rootCmd.AddCommand(searchCmd()) - - if err := rootCmd.Execute(); err != nil { - printError("Error: %v\n", err) - os.Exit(1) - } -} - -func queryCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "query ", - Short: "Query and decode blobs at a specific DA height and namespace", - Long: `Query and decode blobs at a specific DA height and namespace. -Decodes each blob as either header or data and displays detailed information.`, - Args: cobra.ExactArgs(2), - RunE: runQuery, - } - - cmd.Flags().Uint64Var(&filterHeight, "filter-height", 0, "Filter blobs by specific height (0 = no filter)") - - return cmd -} - -func searchCmd() *cobra.Command { - var searchHeight uint64 - var searchRange uint64 - - cmd := &cobra.Command{ - Use: "search --target-height ", - Short: "Search for blobs containing a specific blockchain height", - Long: `Search through multiple DA heights to find blobs containing data from a specific blockchain height. -Starting from the given DA height, searches through a range of DA heights until it finds matching blobs.`, - Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return runSearch(cmd, args, searchHeight, searchRange) - }, - } - - cmd.Flags().Uint64Var(&searchHeight, "target-height", 0, "Target blockchain height to search for (required)") - cmd.Flags().Uint64Var(&searchRange, "range", 10, "Number of DA heights to search") - cmd.MarkFlagRequired("target-height") - - return cmd -} - -func runQuery(cmd *cobra.Command, args []string) error { - height, err := strconv.ParseUint(args[0], 10, 64) - if err != nil { - return fmt.Errorf("invalid height: %w", err) - } - - namespace, err := parseNamespace(args[1]) - if err != nil { - return fmt.Errorf("invalid namespace: %w", err) - } - - printBanner() - printQueryInfo(height, namespace) - - client, err := createDAClient() - if err != nil { - return err - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - return queryHeight(ctx, client, height, namespace) -} - -func runSearch(cmd *cobra.Command, args []string, searchHeight, searchRange uint64) error { - startHeight, err := strconv.ParseUint(args[0], 10, 64) - if err != nil { - return fmt.Errorf("invalid start height: %w", err) - } - - namespace, err := parseNamespace(args[1]) - if err != nil { - return fmt.Errorf("invalid namespace: %w", err) - } - - printBanner() - printSearchInfo(startHeight, namespace, searchHeight, searchRange) - - client, err := createDAClient() - if err != nil { - return err - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - return searchForHeight(ctx, client, startHeight, namespace, searchHeight, searchRange) -} - -func searchForHeight(ctx context.Context, client *jsonrpc.Client, startHeight uint64, namespace []byte, targetHeight, searchRange uint64) error { - fmt.Printf("Searching for height %d in DA heights %d-%d...\n", targetHeight, startHeight, startHeight+searchRange-1) - fmt.Println() - - foundBlobs := 0 - for daHeight := startHeight; daHeight < startHeight+searchRange; daHeight++ { - result, err := client.DA.GetIDs(ctx, daHeight, namespace) - if err != nil { - if err.Error() == "blob: not found" || strings.Contains(err.Error(), "blob: not found") { - continue - } - if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { - fmt.Printf("Reached future height at DA height %d\n", daHeight) - break - } - continue - } - - if result == nil || len(result.IDs) == 0 { - continue - } - - // Get the actual blob data - blobs, err := client.DA.Get(ctx, result.IDs, namespace) - if err != nil { - continue - } - - // Check each blob for the target height - for i, blob := range blobs { - found := false - var blobHeight uint64 - - // Try to decode as header first - if header := tryDecodeHeader(blob); header != nil { - blobHeight = header.Height() - if blobHeight == targetHeight { - found = true - } - } else if data := tryDecodeData(blob); data != nil { - if data.Metadata != nil { - blobHeight = data.Height() - if blobHeight == targetHeight { - found = true - } - } - } - - if found { - foundBlobs++ - fmt.Printf("FOUND at DA Height %d - BLOB %d\n", daHeight, foundBlobs) - fmt.Println(strings.Repeat("-", 80)) - displayBlobInfo(result.IDs[i], blob) - - // Display the decoded content - if header := tryDecodeHeader(blob); header != nil { - printTypeHeader("SignedHeader", "") - displayHeader(header) - } else if data := tryDecodeData(blob); data != nil { - printTypeHeader("SignedData", "") - displayData(data) - } - - fmt.Println() - } - } - } - - fmt.Println(strings.Repeat("=", 50)) - if foundBlobs == 0 { - fmt.Printf("No blobs found containing height %d in DA range %d-%d\n", targetHeight, startHeight, startHeight+searchRange-1) - } else { - fmt.Printf("Found %d blob(s) containing height %d\n", foundBlobs, targetHeight) - } - - return nil -} - -func queryHeight(ctx context.Context, client *jsonrpc.Client, height uint64, namespace []byte) error { - result, err := client.DA.GetIDs(ctx, height, namespace) - if err != nil { - // Handle "blob not found" as a normal case - if err.Error() == "blob: not found" || strings.Contains(err.Error(), "blob: not found") { - fmt.Printf("No blobs found at height %d\n", height) - return nil - } - // Handle future height errors gracefully - if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { - fmt.Printf("Height %d is in the future (not yet available)\n", height) - return nil - } - return fmt.Errorf("failed to get IDs: %w", err) - } - - if result == nil || len(result.IDs) == 0 { - fmt.Printf("No blobs found at height %d\n", height) - return nil - } - - fmt.Printf("Found %d blob(s) at height %d\n", len(result.IDs), height) - fmt.Printf("Timestamp: %s\n", result.Timestamp.Format(time.RFC3339)) - fmt.Println() - - // Get the actual blob data - blobs, err := client.DA.Get(ctx, result.IDs, namespace) - if err != nil { - return fmt.Errorf("failed to get blob data: %w", err) - } - - // Process each blob with optional height filtering - displayedBlobs := 0 - for i, blob := range blobs { - shouldDisplay := true - var blobHeight uint64 - - // Check if we need to filter by height - if filterHeight > 0 { - shouldDisplay = false - - // Try to decode as header first to check height - if header := tryDecodeHeader(blob); header != nil { - blobHeight = header.Height() - if blobHeight == filterHeight { - shouldDisplay = true - } - } else if data := tryDecodeData(blob); data != nil { - if data.Metadata != nil { - blobHeight = data.Height() - if blobHeight == filterHeight { - shouldDisplay = true - } - } - } - } - - if !shouldDisplay { - continue - } - - displayedBlobs++ - printBlobHeader(displayedBlobs, -1) // -1 indicates filtered mode - displayBlobInfo(result.IDs[i], blob) - - // Try to decode as header first - if header := tryDecodeHeader(blob); header != nil { - printTypeHeader("SignedHeader", "") - displayHeader(header) - } else if data := tryDecodeData(blob); data != nil { - printTypeHeader("SignedData", "") - displayData(data) - } else { - printTypeHeader("Raw Data", "") - displayRawData(blob) - } - - if displayedBlobs > 1 { - printSeparator() - } - } - - // Show filter results - if filterHeight > 0 { - if displayedBlobs == 0 { - fmt.Printf("No blobs found matching height filter: %d\n", filterHeight) - } else { - fmt.Printf("Showing %d blob(s) matching height filter: %d\n", displayedBlobs, filterHeight) - } - } - - printFooter() - return nil -} - -func printBanner() { - fmt.Println("DA Debug Tool - Blockchain Data Inspector") - fmt.Println(strings.Repeat("=", 50)) -} - -func printQueryInfo(height uint64, namespace []byte) { - fmt.Printf("DA Height: %d | Namespace: %s | URL: %s", height, formatHash(hex.EncodeToString(namespace)), daURL) - if filterHeight > 0 { - fmt.Printf(" | Filter Height: %d", filterHeight) - } - fmt.Println() - fmt.Println() -} - -func printSearchInfo(startHeight uint64, namespace []byte, targetHeight, searchRange uint64) { - fmt.Printf("Start DA Height: %d | Namespace: %s | URL: %s", startHeight, formatHash(hex.EncodeToString(namespace)), daURL) - fmt.Printf(" | Target Height: %d | Range: %d", targetHeight, searchRange) - fmt.Println() - fmt.Println() -} - -func printBlobHeader(current, total int) { - if total == -1 { - fmt.Printf("BLOB %d\n", current) - } else { - fmt.Printf("BLOB %d/%d\n", current, total) - } - fmt.Println(strings.Repeat("-", 80)) -} - -func displayBlobInfo(id coreda.ID, blob []byte) { - fmt.Printf("ID: %s\n", formatHash(hex.EncodeToString(id))) - fmt.Printf("Size: %s\n", formatSize(len(blob))) - - // Try to parse the ID to show height and commitment - if idHeight, commitment, err := coreda.SplitID(id); err == nil { - fmt.Printf("ID Height: %d\n", idHeight) - fmt.Printf("Commitment: %s\n", formatHash(hex.EncodeToString(commitment))) - } -} - -func printTypeHeader(title, color string) { - fmt.Printf("Type: %s\n", title) -} - -func displayHeader(header *types.SignedHeader) { - fmt.Printf("Height: %d\n", header.Height()) - fmt.Printf("Time: %s\n", header.Time().Format(time.RFC3339)) - fmt.Printf("Chain ID: %s\n", header.ChainID()) - fmt.Printf("Version: Block=%d, App=%d\n", header.Version.Block, header.Version.App) - fmt.Printf("Last Header: %s\n", formatHashField(hex.EncodeToString(header.LastHeaderHash[:]))) - fmt.Printf("Data Hash: %s\n", formatHashField(hex.EncodeToString(header.DataHash[:]))) - fmt.Printf("Validator: %s\n", formatHashField(hex.EncodeToString(header.ValidatorHash[:]))) - fmt.Printf("Proposer: %s\n", formatHashField(hex.EncodeToString(header.ProposerAddress))) - fmt.Printf("Signature: %s\n", formatHashField(hex.EncodeToString(header.Signature))) - if len(header.Signer.Address) > 0 { - fmt.Printf("Signer: %s\n", formatHashField(hex.EncodeToString(header.Signer.Address))) - } -} - -func displayData(data *types.SignedData) { - if data.Metadata != nil { - fmt.Printf("Chain ID: %s\n", data.ChainID()) - fmt.Printf("Height: %d\n", data.Height()) - fmt.Printf("Time: %s\n", data.Time().Format(time.RFC3339)) - fmt.Printf("Last Data: %s\n", formatHashField(hex.EncodeToString(data.LastDataHash[:]))) - } - - dataHash := data.DACommitment() - fmt.Printf("DA Commit: %s\n", formatHashField(hex.EncodeToString(dataHash[:]))) - fmt.Printf("TX Count: %d\n", len(data.Txs)) - fmt.Printf("Signature: %s\n", formatHashField(hex.EncodeToString(data.Signature))) - - if len(data.Signer.Address) > 0 { - fmt.Printf("Signer: %s\n", formatHashField(hex.EncodeToString(data.Signer.Address))) - } - - // Display transactions - if len(data.Txs) > 0 { - fmt.Printf("\nTransactions:\n") - for i, tx := range data.Txs { - fmt.Printf(" [%d] Size: %s, Hash: %s\n", - i+1, - formatSize(len(tx)), - formatShortHash(hex.EncodeToString(tx))) - - if isPrintable(tx) && len(tx) < 200 { - preview := string(tx) - if len(preview) > 60 { - preview = preview[:60] + "..." - } - fmt.Printf(" Data: %s\n", preview) - } - } - } -} - -func displayRawData(blob []byte) { - hexStr := hex.EncodeToString(blob) - if len(hexStr) > 120 { - fmt.Printf("Hex: %s...\n", hexStr[:120]) - fmt.Printf("Full Length: %s\n", formatSize(len(blob))) - } else { - fmt.Printf("Hex: %s\n", hexStr) - } - - if isPrintable(blob) { - strData := string(blob) - if len(strData) > 200 { - fmt.Printf("String: %s...\n", strData[:200]) - } else { - fmt.Printf("String: %s\n", strData) - } - } else { - fmt.Printf("String: (Binary data - not printable)\n") - } -} - -// Helper functions for formatting - -func formatHash(hash string) string { - return hash -} - -func formatHashField(hash string) string { - return hash -} - -func formatShortHash(hash string) string { - return hash -} - -func formatSize(bytes int) string { - if bytes < 1024 { - return fmt.Sprintf("%d B", bytes) - } else if bytes < 1024*1024 { - return fmt.Sprintf("%.1f KB", float64(bytes)/1024) - } else { - return fmt.Sprintf("%.1f MB", float64(bytes)/(1024*1024)) - } -} - -func printSeparator() { - fmt.Println() -} - -func printFooter() { - fmt.Println(strings.Repeat("=", 50)) - fmt.Printf("Analysis complete!\n") -} - -func printError(format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, "Error: "+format, args...) -} - -func tryDecodeHeader(bz []byte) *types.SignedHeader { - header := new(types.SignedHeader) - var headerPb pb.SignedHeader - - if err := proto.Unmarshal(bz, &headerPb); err != nil { - return nil - } - - if err := header.FromProto(&headerPb); err != nil { - return nil - } - - // Basic validation - if err := header.Header.ValidateBasic(); err != nil { - return nil - } - - return header -} - -func tryDecodeData(bz []byte) *types.SignedData { - var signedData types.SignedData - if err := signedData.UnmarshalBinary(bz); err != nil { - return nil - } - - // Skip completely empty data - if len(signedData.Txs) == 0 && len(signedData.Signature) == 0 { - return nil - } - - return &signedData -} - -func createDAClient() (*jsonrpc.Client, error) { - logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).Level(zerolog.InfoLevel) - if verbose { - logger = logger.Level(zerolog.DebugLevel) - } - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - client, err := jsonrpc.NewClient(ctx, logger, daURL, authToken, maxBlobSize) - if err != nil { - return nil, fmt.Errorf("failed to create DA client: %w", err) - } - - return client, nil -} - -func parseNamespace(ns string) ([]byte, error) { - // Try to parse as hex first - if hex, err := parseHex(ns); err == nil && len(hex) == 29 { - return hex, nil - } - - // If not valid hex or not 29 bytes, treat as string identifier - namespace := coreda.NamespaceFromString(ns) - return namespace.Bytes(), nil -} - -func parseHex(s string) ([]byte, error) { - // Remove 0x prefix if present - if len(s) >= 2 && s[:2] == "0x" { - s = s[2:] - } - - return hex.DecodeString(s) -} - -func isPrintable(data []byte) bool { - if len(data) > 1000 { // Only check first 1000 bytes for performance - data = data[:1000] - } - - for _, b := range data { - if b < 32 || b > 126 { - if b != '\n' && b != '\r' && b != '\t' { - return false - } - } - } - return true -} diff --git a/tools/da-debug/main_test.go b/tools/da-debug/main_test.go deleted file mode 100644 index 09818b4bb5..0000000000 --- a/tools/da-debug/main_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package main - -import ( - "encoding/hex" - "testing" - - coreda "github.com/evstack/ev-node/core/da" -) - -func TestParseNamespace(t *testing.T) { - tests := []struct { - name string - input string - expected int // expected length in bytes - wantErr bool - }{ - { - name: "valid hex namespace with 0x prefix", - input: "0x000000000000000000000000000000000000000000000000000000746573743031", - expected: 29, - wantErr: false, - }, - { - name: "valid hex namespace without prefix", - input: "000000000000000000000000000000000000000000000000000000746573743031", - expected: 29, - wantErr: false, - }, - { - name: "string identifier", - input: "test-namespace", - expected: 29, - wantErr: false, - }, - { - name: "empty string", - input: "", - expected: 29, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := parseNamespace(tt.input) - - if tt.wantErr && err == nil { - t.Errorf("parseNamespace() expected error, got nil") - } - if !tt.wantErr && err != nil { - t.Errorf("parseNamespace() unexpected error: %v", err) - } - - if len(result) != tt.expected { - t.Errorf("parseNamespace() result length = %d, expected %d", len(result), tt.expected) - } - }) - } -} - -func TestTryDecodeHeader(t *testing.T) { - // Test with invalid data - result := tryDecodeHeader([]byte("invalid")) - if result != nil { - t.Errorf("tryDecodeHeader() with invalid data should return nil") - } - - // Test with empty data - result = tryDecodeHeader([]byte{}) - if result != nil { - t.Errorf("tryDecodeHeader() with empty data should return nil") - } -} - -func TestTryDecodeData(t *testing.T) { - // Test with invalid data - result := tryDecodeData([]byte("invalid")) - if result != nil { - t.Errorf("tryDecodeData() with invalid data should return nil") - } - - // Test with empty data - result = tryDecodeData([]byte{}) - if result != nil { - t.Errorf("tryDecodeData() with empty data should return nil") - } -} - -func TestParseHex(t *testing.T) { - tests := []struct { - name string - input string - expected string - wantErr bool - }{ - { - name: "with 0x prefix", - input: "0xdeadbeef", - expected: "deadbeef", - wantErr: false, - }, - { - name: "without prefix", - input: "deadbeef", - expected: "deadbeef", - wantErr: false, - }, - { - name: "invalid hex", - input: "xyz123", - wantErr: true, - }, - { - name: "empty string", - input: "", - expected: "", - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := parseHex(tt.input) - - if tt.wantErr && err == nil { - t.Errorf("parseHex() expected error, got nil") - } - if !tt.wantErr && err != nil { - t.Errorf("parseHex() unexpected error: %v", err) - } - - if !tt.wantErr { - resultHex := hex.EncodeToString(result) - if resultHex != tt.expected { - t.Errorf("parseHex() result = %s, expected %s", resultHex, tt.expected) - } - } - }) - } -} - -func TestIsPrintable(t *testing.T) { - tests := []struct { - name string - input []byte - expected bool - }{ - { - name: "printable ASCII", - input: []byte("Hello, World!"), - expected: true, - }, - { - name: "with newlines and tabs", - input: []byte("Hello\nWorld\t!"), - expected: true, - }, - { - name: "binary data", - input: []byte{0x00, 0x01, 0x02, 0xFF}, - expected: false, - }, - { - name: "mixed printable and non-printable", - input: []byte("Hello\x00World"), - expected: false, - }, - { - name: "empty", - input: []byte{}, - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := isPrintable(tt.input) - if result != tt.expected { - t.Errorf("isPrintable() = %v, expected %v", result, tt.expected) - } - }) - } -} - -func TestIDSplitting(t *testing.T) { - // Test with a mock ID that follows the expected format - height := uint64(12345) - commitment := []byte("test-commitment-data") - - // Create an ID using the format from the LocalDA implementation - id := make([]byte, 8+len(commitment)) - // Use little endian as per the da.go implementation - id[0] = byte(height) - id[1] = byte(height >> 8) - id[2] = byte(height >> 16) - id[3] = byte(height >> 24) - id[4] = byte(height >> 32) - id[5] = byte(height >> 40) - id[6] = byte(height >> 48) - id[7] = byte(height >> 56) - copy(id[8:], commitment) - - // Test splitting - parsedHeight, parsedCommitment, err := coreda.SplitID(id) - if err != nil { - t.Errorf("SplitID() unexpected error: %v", err) - } - - if parsedHeight != height { - t.Errorf("SplitID() height = %d, expected %d", parsedHeight, height) - } - - if string(parsedCommitment) != string(commitment) { - t.Errorf("SplitID() commitment = %s, expected %s", string(parsedCommitment), string(commitment)) - } -} diff --git a/tools/tools.mk b/tools/tools.mk index 2eefeec775..0c18b1a716 100644 --- a/tools/tools.mk +++ b/tools/tools.mk @@ -1,7 +1,7 @@ # tools.mk - Build configuration for ev-node tools # Tool names -TOOLS := da-debug blob-decoder cache-analyzer +TOOLS := blob-decoder cache-analyzer # Build directory TOOLS_BUILD_DIR := $(CURDIR)/build @@ -14,14 +14,6 @@ LDFLAGS ?= \ -X main.GitSHA=$(GITSHA) # Individual tool build targets -## build-tool-da-debug: Build da-debug tool -build-tool-da-debug: - @echo "--> Building da-debug tool" - @mkdir -p $(TOOLS_BUILD_DIR) - @cd tools/da-debug && go build -ldflags "$(LDFLAGS)" -o $(TOOLS_BUILD_DIR)/da-debug . - @echo "--> da-debug built: $(TOOLS_BUILD_DIR)/da-debug" -.PHONY: build-tool-da-debug - ## build-tool-blob-decoder: Build blob-decoder tool build-tool-blob-decoder: @echo "--> Building blob-decoder tool" @@ -45,13 +37,6 @@ build-tools: $(addprefix build-tool-, $(TOOLS)) .PHONY: build-tools # Install individual tools -## install-tool-da-debug: Install da-debug tool to Go bin -install-tool-da-debug: - @echo "--> Installing da-debug tool" - @cd tools/da-debug && go install -ldflags "$(LDFLAGS)" . - @echo "--> da-debug installed to Go bin" -.PHONY: install-tool-da-debug - ## install-tool-blob-decoder: Install blob-decoder tool to Go bin install-tool-blob-decoder: @echo "--> Installing blob-decoder tool" From 8fbeed390d6828cba8550b2baab0d72cf6300ac8 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Mon, 24 Nov 2025 12:43:15 +0100 Subject: [PATCH 08/35] refactor: consolidate DA interface into single file Move all DA-related code from core/da module into a single da/da.go file. The DA interface is now co-located with its implementation (Celestia adapter) rather than being a separate core module. Changes: - Consolidate core/da/{da.go,errors.go,namespace.go} into da/da.go - Move DummyDA test utility to da/testing.go - Update all imports from github.com/evstack/ev-node/core/da to github.com/evstack/ev-node/da - Remove core/da directory entirely - Fix package shadowing issues in block/internal/{submitting,syncing} The DA interface is kept for testability with mocks, but no longer needs to be a separate module since Celestia is the only implementation. --- apps/evm/single/cmd/run.go | 2 +- apps/grpc/single/cmd/run.go | 2 +- apps/testapp/cmd/run.go | 2 +- block/components.go | 6 +- block/components_test.go | 8 +- block/internal/submitting/da_submitter.go | 26 +- .../da_submitter_integration_test.go | 4 +- .../submitting/da_submitter_mocks_test.go | 28 +-- .../internal/submitting/da_submitter_test.go | 8 +- block/internal/syncing/da_retriever.go | 48 ++-- block/internal/syncing/da_retriever_test.go | 26 +- block/internal/syncing/syncer.go | 10 +- block/internal/syncing/syncer_backoff_test.go | 16 +- block/internal/syncing/syncer_test.go | 4 +- core/da/da.go | 126 ---------- core/da/errors.go | 16 -- core/da/namespace.go | 129 ---------- da/celestia/adapter.go | 2 +- da/celestia/adapter_test.go | 2 +- da/cmd/local-da/local.go | 40 +-- da/cmd/local-da/server.go | 2 +- da/da.go | 232 ++++++++++++++++++ core/da/namespace_test.go => da/da_test.go | 0 da/internal/mocks/da.go | 2 +- core/da/dummy.go => da/testing.go | 0 core/da/dummy_test.go => da/testing_test.go | 0 node/full.go | 6 +- node/helpers_test.go | 10 +- node/node.go | 4 +- node/single_sequencer_integration_test.go | 4 +- pkg/cmd/run_node.go | 4 +- pkg/cmd/run_node_test.go | 8 +- pkg/config/config.go | 2 +- pkg/rpc/server/da_visualization.go | 34 +-- pkg/rpc/server/da_visualization_test.go | 46 ++-- pkg/rpc/server/server.go | 6 +- sequencers/single/sequencer.go | 10 +- sequencers/single/sequencer_test.go | 26 +- test/mocks/da.go | 2 +- types/da.go | 110 ++++----- types/da_test.go | 90 +++---- 41 files changed, 532 insertions(+), 571 deletions(-) delete mode 100644 core/da/da.go delete mode 100644 core/da/errors.go delete mode 100644 core/da/namespace.go create mode 100644 da/da.go rename core/da/namespace_test.go => da/da_test.go (100%) rename core/da/dummy.go => da/testing.go (100%) rename core/da/dummy_test.go => da/testing_test.go (100%) diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index 77a0d34533..0c2f2be85d 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -7,7 +7,7 @@ import ( "os" "path/filepath" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" celestiada "github.com/evstack/ev-node/da/celestia" "github.com/evstack/ev-node/node" "github.com/evstack/ev-node/sequencers/single" diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index 5eb9d8c1e8..7953382f36 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/core/execution" celestiada "github.com/evstack/ev-node/da/celestia" executiongrpc "github.com/evstack/ev-node/execution/grpc" diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index 182ed381a2..f51cab76b1 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -8,7 +8,7 @@ import ( "github.com/spf13/cobra" kvexecutor "github.com/evstack/ev-node/apps/testapp/kv" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" celestiada "github.com/evstack/ev-node/da/celestia" "github.com/evstack/ev-node/node" rollcmd "github.com/evstack/ev-node/pkg/cmd" diff --git a/block/components.go b/block/components.go index 4b493b5a4f..3c0b913023 100644 --- a/block/components.go +++ b/block/components.go @@ -13,7 +13,7 @@ import ( "github.com/evstack/ev-node/block/internal/reaping" "github.com/evstack/ev-node/block/internal/submitting" "github.com/evstack/ev-node/block/internal/syncing" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" @@ -131,7 +131,7 @@ func NewSyncComponents( genesis genesis.Genesis, store store.Store, exec coreexecutor.Executor, - da coreda.DA, + da da.DA, headerStore common.Broadcaster[*types.SignedHeader], dataStore common.Broadcaster[*types.Data], logger zerolog.Logger, @@ -194,7 +194,7 @@ func NewAggregatorComponents( store store.Store, exec coreexecutor.Executor, sequencer coresequencer.Sequencer, - da coreda.DA, + da da.DA, signer signer.Signer, headerBroadcaster common.Broadcaster[*types.SignedHeader], dataBroadcaster common.Broadcaster[*types.Data], diff --git a/block/components_test.go b/block/components_test.go index eadf45328c..3fa9cae8f5 100644 --- a/block/components_test.go +++ b/block/components_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -92,7 +92,7 @@ func TestNewSyncComponents_Creation(t *testing.T) { } mockExec := testmocks.NewMockExecutor(t) - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) // Just test that the constructor doesn't panic - don't start the components // to avoid P2P store dependencies @@ -143,7 +143,7 @@ func TestNewAggregatorComponents_Creation(t *testing.T) { mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) components, err := NewAggregatorComponents( cfg, @@ -197,7 +197,7 @@ func TestExecutor_RealExecutionClientFailure_StopsNode(t *testing.T) { // Create mock executor that will fail on ExecuteTxs mockExec := testmocks.NewMockExecutor(t) mockSeq := testmocks.NewMockSequencer(t) - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) // Mock InitChain to succeed initially mockExec.On("InitChain", mock.Anything, mock.Anything, mock.Anything, mock.Anything). diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 5a8fabc167..f170244ea8 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -12,7 +12,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - coreda "github.com/evstack/ev-node/core/da" + dapkg "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" pkgda "github.com/evstack/ev-node/pkg/da" "github.com/evstack/ev-node/pkg/genesis" @@ -94,7 +94,7 @@ func clamp(v, min, max time.Duration) time.Duration { // DASubmitter handles DA submission operations type DASubmitter struct { - da coreda.DA + da dapkg.DA config config.Config genesis genesis.Genesis options common.BlockOptions @@ -111,7 +111,7 @@ type DASubmitter struct { // NewDASubmitter creates a new DA submitter func NewDASubmitter( - da coreda.DA, + da dapkg.DA, config config.Config, genesis genesis.Genesis, options common.BlockOptions, @@ -148,8 +148,8 @@ func NewDASubmitter( options: options, metrics: metrics, logger: daSubmitterLogger, - namespaceBz: coreda.NamespaceFromString(config.DA.GetNamespace()).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), + namespaceBz: dapkg.NamespaceFromString(config.DA.GetNamespace()).Bytes(), + namespaceDataBz: dapkg.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), addressSelector: addressSelector, } } @@ -189,7 +189,7 @@ func (s *DASubmitter) SubmitHeaders(ctx context.Context, cache cache.Manager) er } return proto.Marshal(headerPb) }, - func(submitted []*types.SignedHeader, res *coreda.ResultSubmit) { + func(submitted []*types.SignedHeader, res *dapkg.ResultSubmit) { for _, header := range submitted { cache.SetHeaderDAIncluded(header.Hash().String(), res.Height, header.Height()) } @@ -232,7 +232,7 @@ func (s *DASubmitter) SubmitData(ctx context.Context, cache cache.Manager, signe func(signedData *types.SignedData) ([]byte, error) { return signedData.MarshalBinary() }, - func(submitted []*types.SignedData, res *coreda.ResultSubmit) { + func(submitted []*types.SignedData, res *dapkg.ResultSubmit) { for _, sd := range submitted { cache.SetDataDAIncluded(sd.Data.DACommitment().String(), res.Height, sd.Height()) } @@ -348,7 +348,7 @@ func submitToDA[T any]( ctx context.Context, items []T, marshalFn func(T) ([]byte, error), - postSubmit func([]T, *coreda.ResultSubmit), + postSubmit func([]T, *dapkg.ResultSubmit), itemType string, namespace []byte, options []byte, @@ -420,7 +420,7 @@ func submitToDA[T any]( } switch res.Code { - case coreda.StatusSuccess: + case dapkg.StatusSuccess: submitted := items[:res.SubmittedCount] postSubmit(submitted, &res) s.logger.Info().Str("itemType", itemType).Uint64("count", res.SubmittedCount).Msg("successfully submitted items to DA layer") @@ -441,7 +441,7 @@ func submitToDA[T any]( s.metrics.DASubmitterPendingBlobs.Set(float64(getTotalPendingFn())) } - case coreda.StatusTooBig: + case dapkg.StatusTooBig: // Record failure metric s.recordFailure(common.DASubmitterFailureReasonTooBig) // Iteratively halve until it fits or single-item too big @@ -465,19 +465,19 @@ func submitToDA[T any]( s.metrics.DASubmitterPendingBlobs.Set(float64(getTotalPendingFn())) } - case coreda.StatusNotIncludedInBlock: + case dapkg.StatusNotIncludedInBlock: // Record failure metric s.recordFailure(common.DASubmitterFailureReasonNotIncludedInBlock) s.logger.Info().Dur("backoff", pol.MaxBackoff).Msg("retrying due to mempool state") rs.Next(reasonMempool, pol) - case coreda.StatusAlreadyInMempool: + case dapkg.StatusAlreadyInMempool: // Record failure metric s.recordFailure(common.DASubmitterFailureReasonAlreadyInMempool) s.logger.Info().Dur("backoff", pol.MaxBackoff).Msg("retrying due to mempool state") rs.Next(reasonMempool, pol) - case coreda.StatusContextCanceled: + case dapkg.StatusContextCanceled: // Record failure metric s.recordFailure(common.DASubmitterFailureReasonContextCanceled) s.logger.Info().Msg("DA layer submission canceled due to context cancellation") diff --git a/block/internal/submitting/da_submitter_integration_test.go b/block/internal/submitting/da_submitter_integration_test.go index 421340e11d..4daaeee9dc 100644 --- a/block/internal/submitting/da_submitter_integration_test.go +++ b/block/internal/submitting/da_submitter_integration_test.go @@ -15,7 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer/noop" @@ -83,7 +83,7 @@ func TestDASubmitter_SubmitHeadersAndData_MarksInclusionAndUpdatesLastSubmitted( require.NoError(t, batch2.Commit()) // Dummy DA - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) // Create DA submitter daSubmitter := NewDASubmitter(dummyDA, cfg, gen, common.DefaultBlockOptions(), common.NopMetrics(), zerolog.Nop()) diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index d914e6db61..7efce331d5 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/evstack/ev-node/block/internal/common" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/test/mocks" @@ -39,7 +39,7 @@ func TestSubmitToDA_MempoolRetry_IncreasesGasAndSucceeds(t *testing.T) { mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") var usedGas []float64 mockDA. @@ -47,7 +47,7 @@ func TestSubmitToDA_MempoolRetry_IncreasesGasAndSucceeds(t *testing.T) { Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(nil, coreda.ErrTxTimedOut). + Return(nil, da.ErrTxTimedOut). Once() ids := [][]byte{[]byte("id1"), []byte("id2"), []byte("id3")} @@ -68,7 +68,7 @@ func TestSubmitToDA_MempoolRetry_IncreasesGasAndSucceeds(t *testing.T) { ctx, items, marshalString, - func(_ []string, _ *coreda.ResultSubmit) {}, + func(_ []string, _ *da.ResultSubmit) {}, "item", nsBz, opts, @@ -86,7 +86,7 @@ func TestSubmitToDA_UnknownError_RetriesSameGasThenSucceeds(t *testing.T) { mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") var usedGas []float64 @@ -115,7 +115,7 @@ func TestSubmitToDA_UnknownError_RetriesSameGasThenSucceeds(t *testing.T) { ctx, items, marshalString, - func(_ []string, _ *coreda.ResultSubmit) {}, + func(_ []string, _ *da.ResultSubmit) {}, "item", nsBz, opts, @@ -131,7 +131,7 @@ func TestSubmitToDA_TooBig_HalvesBatch(t *testing.T) { mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") // record sizes of batches sent to DA @@ -144,7 +144,7 @@ func TestSubmitToDA_TooBig_HalvesBatch(t *testing.T) { blobs := args.Get(1).([][]byte) batchSizes = append(batchSizes, len(blobs)) }). - Return(nil, coreda.ErrBlobSizeOverLimit). + Return(nil, da.ErrBlobSizeOverLimit). Once() // Second attempt: expect half the size, succeed @@ -167,7 +167,7 @@ func TestSubmitToDA_TooBig_HalvesBatch(t *testing.T) { ctx, items, marshalString, - func(_ []string, _ *coreda.ResultSubmit) {}, + func(_ []string, _ *da.ResultSubmit) {}, "item", nsBz, opts, @@ -183,7 +183,7 @@ func TestSubmitToDA_SentinelNoGas_PreservesGasAcrossRetries(t *testing.T) { mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") var usedGas []float64 @@ -192,7 +192,7 @@ func TestSubmitToDA_SentinelNoGas_PreservesGasAcrossRetries(t *testing.T) { mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(nil, coreda.ErrTxAlreadyInMempool). + Return(nil, da.ErrTxAlreadyInMempool). Once() // Second attempt: should use same sentinel gas (-1), succeed @@ -212,7 +212,7 @@ func TestSubmitToDA_SentinelNoGas_PreservesGasAcrossRetries(t *testing.T) { ctx, items, marshalString, - func(_ []string, _ *coreda.ResultSubmit) {}, + func(_ []string, _ *da.ResultSubmit) {}, "item", nsBz, opts, @@ -228,7 +228,7 @@ func TestSubmitToDA_PartialSuccess_AdvancesWindow(t *testing.T) { mockDA := mocks.NewMockDA(t) - nsBz := coreda.NamespaceFromString("ns").Bytes() + nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") // track how many items postSubmit sees across attempts @@ -251,7 +251,7 @@ func TestSubmitToDA_PartialSuccess_AdvancesWindow(t *testing.T) { ctx, items, marshalString, - func(submitted []string, _ *coreda.ResultSubmit) { totalSubmitted += len(submitted) }, + func(submitted []string, _ *da.ResultSubmit) { totalSubmitted += len(submitted) }, "item", nsBz, opts, diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index c657d8185b..afd7dba76e 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -15,7 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/rpc/server" @@ -25,7 +25,7 @@ import ( "github.com/evstack/ev-node/types" ) -func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manager, coreda.DA, genesis.Genesis) { +func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manager, da.DA, genesis.Genesis) { t.Helper() // Create store and cache @@ -35,7 +35,7 @@ func setupDASubmitterTest(t *testing.T) (*DASubmitter, store.Store, cache.Manage require.NoError(t, err) // Create dummy DA - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) // Create config cfg := config.DefaultConfig() @@ -93,7 +93,7 @@ func TestNewDASubmitterSetsVisualizerWhenEnabled(t *testing.T) { cfg.RPC.EnableDAVisualization = true cfg.Node.Aggregator = true - dummyDA := coreda.NewDummyDA(10_000_000, 10*time.Millisecond) + dummyDA := da.NewDummyDA(10_000_000, 10*time.Millisecond) NewDASubmitter( dummyDA, diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index de67e1fd1c..cbadb60cfe 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -12,7 +12,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - coreda "github.com/evstack/ev-node/core/da" + dapkg "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" @@ -24,7 +24,7 @@ const defaultDATimeout = 10 * time.Second // DARetriever handles DA retrieval operations for syncing type DARetriever struct { - da coreda.DA + da dapkg.DA cache cache.Manager genesis genesis.Genesis logger zerolog.Logger @@ -41,7 +41,7 @@ type DARetriever struct { // NewDARetriever creates a new DA retriever func NewDARetriever( - da coreda.DA, + da dapkg.DA, cache cache.Manager, config config.Config, genesis genesis.Genesis, @@ -52,8 +52,8 @@ func NewDARetriever( cache: cache, genesis: genesis, logger: logger.With().Str("component", "da_retriever").Logger(), - namespaceBz: coreda.NamespaceFromString(config.DA.GetNamespace()).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), + namespaceBz: dapkg.NamespaceFromString(config.DA.GetNamespace()).Bytes(), + namespaceDataBz: dapkg.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), pendingHeaders: make(map[uint64]*types.SignedHeader), pendingData: make(map[uint64]*types.Data), } @@ -77,7 +77,7 @@ func (r *DARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co } // fetchBlobs retrieves blobs from the DA layer -func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.ResultRetrieve, error) { +func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (dapkg.ResultRetrieve, error) { // Retrieve from both namespaces headerRes := types.RetrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceBz, defaultDATimeout) @@ -91,31 +91,31 @@ func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.R // Validate responses headerErr := r.validateBlobResponse(headerRes, daHeight) // ignoring error not found, as data can have data - if headerErr != nil && !errors.Is(headerErr, coreda.ErrBlobNotFound) { + if headerErr != nil && !errors.Is(headerErr, dapkg.ErrBlobNotFound) { return headerRes, headerErr } dataErr := r.validateBlobResponse(dataRes, daHeight) // ignoring error not found, as header can have data - if dataErr != nil && !errors.Is(dataErr, coreda.ErrBlobNotFound) { + if dataErr != nil && !errors.Is(dataErr, dapkg.ErrBlobNotFound) { return dataRes, dataErr } // Combine successful results - combinedResult := coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + combinedResult := dapkg.ResultRetrieve{ + BaseResult: dapkg.BaseResult{ + Code: dapkg.StatusSuccess, Height: daHeight, }, Data: make([][]byte, 0), } - if headerRes.Code == coreda.StatusSuccess { + if headerRes.Code == dapkg.StatusSuccess { combinedResult.Data = append(combinedResult.Data, headerRes.Data...) combinedResult.IDs = append(combinedResult.IDs, headerRes.IDs...) } - if dataRes.Code == coreda.StatusSuccess { + if dataRes.Code == dapkg.StatusSuccess { combinedResult.Data = append(combinedResult.Data, dataRes.Data...) combinedResult.IDs = append(combinedResult.IDs, dataRes.IDs...) } @@ -123,25 +123,25 @@ func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (coreda.R // Re-throw error not found if both were not found. if len(combinedResult.Data) == 0 && len(combinedResult.IDs) == 0 { r.logger.Debug().Uint64("da_height", daHeight).Msg("no blob data found") - combinedResult.Code = coreda.StatusNotFound - combinedResult.Message = coreda.ErrBlobNotFound.Error() - return combinedResult, coreda.ErrBlobNotFound + combinedResult.Code = dapkg.StatusNotFound + combinedResult.Message = dapkg.ErrBlobNotFound.Error() + return combinedResult, dapkg.ErrBlobNotFound } return combinedResult, nil } // validateBlobResponse validates a blob response from DA layer -// those are the only error code returned by da.RetrieveWithHelpers -func (r *DARetriever) validateBlobResponse(res coreda.ResultRetrieve, daHeight uint64) error { +// those are the only error code returned by dapkg.RetrieveWithHelpers +func (r *DARetriever) validateBlobResponse(res dapkg.ResultRetrieve, daHeight uint64) error { switch res.Code { - case coreda.StatusError: + case dapkg.StatusError: return fmt.Errorf("DA retrieval failed: %s", res.Message) - case coreda.StatusHeightFromFuture: - return fmt.Errorf("%w: height from future", coreda.ErrHeightFromFuture) - case coreda.StatusNotFound: - return fmt.Errorf("%w: blob not found", coreda.ErrBlobNotFound) - case coreda.StatusSuccess: + case dapkg.StatusHeightFromFuture: + return fmt.Errorf("%w: height from future", dapkg.ErrHeightFromFuture) + case dapkg.StatusNotFound: + return fmt.Errorf("%w: blob not found", dapkg.ErrBlobNotFound) + case dapkg.StatusSuccess: r.logger.Debug().Uint64("da_height", daHeight).Msg("successfully retrieved from DA") return nil default: diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index c6e8daa78f..ee514255ea 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -18,7 +18,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" @@ -75,11 +75,11 @@ func TestDARetriever_RetrieveFromDA_NotFound(t *testing.T) { // GetIDs returns ErrBlobNotFound -> helper maps to StatusNotFound mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, fmt.Errorf("%s: whatever", coreda.ErrBlobNotFound.Error())).Maybe() + Return(nil, fmt.Errorf("%s: whatever", da.ErrBlobNotFound.Error())).Maybe() r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) events, err := r.RetrieveFromDA(context.Background(), 42) - assert.True(t, errors.Is(err, coreda.ErrBlobNotFound)) + assert.True(t, errors.Is(err, da.ErrBlobNotFound)) assert.Len(t, events, 0) } @@ -92,12 +92,12 @@ func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { mockDA := testmocks.NewMockDA(t) // GetIDs returns ErrHeightFromFuture -> helper maps to StatusHeightFromFuture, fetchBlobs returns error mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, fmt.Errorf("%s: later", coreda.ErrHeightFromFuture.Error())).Maybe() + Return(nil, fmt.Errorf("%s: later", da.ErrHeightFromFuture.Error())).Maybe() r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) events, derr := r.RetrieveFromDA(context.Background(), 1000) assert.Error(t, derr) - assert.True(t, errors.Is(derr, coreda.ErrHeightFromFuture)) + assert.True(t, errors.Is(derr, da.ErrHeightFromFuture)) assert.Nil(t, events) } @@ -257,15 +257,15 @@ func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { func TestDARetriever_validateBlobResponse(t *testing.T) { r := &DARetriever{logger: zerolog.Nop()} // StatusSuccess -> nil - err := r.validateBlobResponse(coreda.ResultRetrieve{BaseResult: coreda.BaseResult{Code: coreda.StatusSuccess}}, 1) + err := r.validateBlobResponse(da.ResultRetrieve{BaseResult: da.BaseResult{Code: da.StatusSuccess}}, 1) assert.NoError(t, err) // StatusError -> error - err = r.validateBlobResponse(coreda.ResultRetrieve{BaseResult: coreda.BaseResult{Code: coreda.StatusError, Message: "fail"}}, 1) + err = r.validateBlobResponse(da.ResultRetrieve{BaseResult: da.BaseResult{Code: da.StatusError, Message: "fail"}}, 1) assert.Error(t, err) // StatusHeightFromFuture -> specific error - err = r.validateBlobResponse(coreda.ResultRetrieve{BaseResult: coreda.BaseResult{Code: coreda.StatusHeightFromFuture}}, 1) + err = r.validateBlobResponse(da.ResultRetrieve{BaseResult: da.BaseResult{Code: da.StatusHeightFromFuture}}, 1) assert.Error(t, err) - assert.True(t, errors.Is(err, coreda.ErrHeightFromFuture)) + assert.True(t, errors.Is(err, da.ErrHeightFromFuture)) } func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { @@ -285,18 +285,18 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { cfg.DA.Namespace = "nsHdr" cfg.DA.DataNamespace = "nsData" - namespaceBz := coreda.NamespaceFromString(cfg.DA.GetNamespace()).Bytes() - namespaceDataBz := coreda.NamespaceFromString(cfg.DA.GetDataNamespace()).Bytes() + namespaceBz := da.NamespaceFromString(cfg.DA.GetNamespace()).Bytes() + namespaceDataBz := da.NamespaceFromString(cfg.DA.GetDataNamespace()).Bytes() mockDA := testmocks.NewMockDA(t) // Expect GetIDs for both namespaces mockDA.EXPECT().GetIDs(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceBz) })). - Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("h1")}, Timestamp: time.Now()}, nil).Once() + Return(&da.GetIDsResult{IDs: [][]byte{[]byte("h1")}, Timestamp: time.Now()}, nil).Once() mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceBz) })). Return([][]byte{hdrBin}, nil).Once() mockDA.EXPECT().GetIDs(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). - Return(&coreda.GetIDsResult{IDs: [][]byte{[]byte("d1")}, Timestamp: time.Now()}, nil).Once() + Return(&da.GetIDsResult{IDs: [][]byte{[]byte("d1")}, Timestamp: time.Now()}, nil).Once() mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). Return([][]byte{dataBin}, nil).Once() diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index d34dceca51..01900c49a6 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -13,7 +13,7 @@ import ( "github.com/rs/zerolog" "golang.org/x/sync/errgroup" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" "github.com/evstack/ev-node/block/internal/cache" @@ -38,7 +38,7 @@ type Syncer struct { // Core components store store.Store exec coreexecutor.Executor - da coreda.DA + da da.DA // Shared components cache cache.Manager @@ -83,7 +83,7 @@ type Syncer struct { func NewSyncer( store store.Store, exec coreexecutor.Executor, - da coreda.DA, + da da.DA, cache cache.Manager, metrics *common.Metrics, config config.Config, @@ -306,10 +306,10 @@ func (s *Syncer) fetchDAUntilCaughtUp() error { events, err := s.daRetriever.RetrieveFromDA(ctx, daHeight) if err != nil { switch { - case errors.Is(err, coreda.ErrBlobNotFound): + case errors.Is(err, da.ErrBlobNotFound): s.SetDAHeight(daHeight + 1) continue // Fetch next height immediately - case errors.Is(err, coreda.ErrHeightFromFuture): + case errors.Is(err, da.ErrHeightFromFuture): s.logger.Debug().Err(err).Uint64("da_height", daHeight).Msg("DA is ahead of local target; backing off future height requests") return nil // Caught up default: diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index 9d58d226ea..732e6e6aba 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -15,7 +15,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/core/execution" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -41,13 +41,13 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { }, "height_from_future_triggers_backoff": { daBlockTime: 500 * time.Millisecond, - error: coreda.ErrHeightFromFuture, + error: da.ErrHeightFromFuture, expectsBackoff: true, description: "Height from future should trigger backoff", }, "blob_not_found_no_backoff": { daBlockTime: 1 * time.Second, - error: coreda.ErrBlobNotFound, + error: da.ErrBlobNotFound, expectsBackoff: false, description: "ErrBlobNotFound should not trigger backoff", }, @@ -111,7 +111,7 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { // Cancel to end test cancel() }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() } else { // For ErrBlobNotFound, DA height should increment daRetriever.On("RetrieveFromDA", mock.Anything, uint64(101)). @@ -120,7 +120,7 @@ func TestSyncer_BackoffOnDAError(t *testing.T) { callCount++ cancel() }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() } // Run sync loop @@ -223,7 +223,7 @@ func TestSyncer_BackoffResetOnSuccess(t *testing.T) { callTimes = append(callTimes, time.Now()) cancel() }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() // Start process loop to handle events go syncer.processLoop() @@ -292,7 +292,7 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { Run(func(args mock.Arguments) { callTimes = append(callTimes, time.Now()) }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() // Third call - should continue without delay (DA height incremented) daRetriever.On("RetrieveFromDA", mock.Anything, uint64(101)). @@ -300,7 +300,7 @@ func TestSyncer_BackoffBehaviorIntegration(t *testing.T) { callTimes = append(callTimes, time.Now()) cancel() }). - Return(nil, coreda.ErrBlobNotFound).Once() + Return(nil, da.ErrBlobNotFound).Once() go syncer.processLoop() syncer.startSyncWorkers() diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 93e7ae38b7..696f917d3d 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/core/execution" "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" @@ -424,7 +424,7 @@ func TestSyncLoopPersistState(t *testing.T) { }, 1*time.Second, 10*time.Millisecond) cancel() }). - Return(nil, coreda.ErrHeightFromFuture) + Return(nil, da.ErrHeightFromFuture) go syncerInst1.processLoop() syncerInst1.startSyncWorkers() diff --git a/core/da/da.go b/core/da/da.go deleted file mode 100644 index 4229f99879..0000000000 --- a/core/da/da.go +++ /dev/null @@ -1,126 +0,0 @@ -package da - -import ( - "context" - "encoding/binary" - "fmt" - "time" -) - -// DA defines very generic interface for interaction with Data Availability layers. -type DA interface { - // Get returns Blob for each given ID, or an error. - // - // Error should be returned if ID is not formatted properly, there is no Blob for given ID or any other client-level - // error occurred (dropped connection, timeout, etc). - Get(ctx context.Context, ids []ID, namespace []byte) ([]Blob, error) - - // GetIDs returns IDs of all Blobs located in DA at given height. - GetIDs(ctx context.Context, height uint64, namespace []byte) (*GetIDsResult, error) - - // GetProofs returns inclusion Proofs for Blobs specified by their IDs. - GetProofs(ctx context.Context, ids []ID, namespace []byte) ([]Proof, error) - - // Commit creates a Commitment for each given Blob. - Commit(ctx context.Context, blobs []Blob, namespace []byte) ([]Commitment, error) - - // Submit submits the Blobs to Data Availability layer. - // - // This method is synchronous. Upon successful submission to Data Availability layer, it returns the IDs identifying blobs - // in DA. - Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ([]ID, error) - - // SubmitWithOptions submits the Blobs to Data Availability layer with additional options. - SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ([]ID, error) - - // Validate validates Commitments against the corresponding Proofs. This should be possible without retrieving the Blobs. - Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) -} - -// Blob is the data submitted/received from DA interface. -type Blob = []byte - -// ID should contain serialized data required by the implementation to find blob in Data Availability layer. -type ID = []byte - -// Commitment should contain serialized cryptographic commitment to Blob value. -type Commitment = []byte - -// Proof should contain serialized proof of inclusion (publication) of Blob in Data Availability layer. -type Proof = []byte - -// GetIDsResult holds the result of GetIDs call: IDs and timestamp of corresponding block. -type GetIDsResult struct { - IDs []ID - Timestamp time.Time -} - -// ResultSubmit contains information returned from DA layer after block headers/data submission. -type ResultSubmit struct { - BaseResult -} - -// ResultRetrieveHeaders contains batch of block headers returned from DA layer client. -type ResultRetrieve struct { - BaseResult - // Data is the block data retrieved from Data Availability Layer. - // If Code is not equal to StatusSuccess, it has to be nil. - Data [][]byte -} - -// StatusCode is a type for DA layer return status. -// TODO: define an enum of different non-happy-path cases -// that might need to be handled by Evolve independent of -// the underlying DA chain. -type StatusCode uint64 - -// Data Availability return codes. -const ( - StatusUnknown StatusCode = iota - StatusSuccess - StatusNotFound - StatusNotIncludedInBlock - StatusAlreadyInMempool - StatusTooBig - StatusContextDeadline - StatusError - StatusIncorrectAccountSequence - StatusContextCanceled - StatusHeightFromFuture -) - -// BaseResult contains basic information returned by DA layer. -type BaseResult struct { - // Code is to determine if the action succeeded. - Code StatusCode - // Message may contain DA layer specific information (like DA block height/hash, detailed error message, etc) - Message string - // Height is the height of the block on Data Availability Layer for given result. - Height uint64 - // SubmittedCount is the number of successfully submitted blocks. - SubmittedCount uint64 - // BlobSize is the size of the blob submitted. - BlobSize uint64 - // IDs is the list of IDs of the blobs submitted. - IDs [][]byte - // Timestamp is the timestamp of the posted data on Data Availability Layer. - Timestamp time.Time -} - -// makeID creates an ID from a height and a commitment. -func makeID(height uint64, commitment []byte) []byte { - id := make([]byte, len(commitment)+8) - binary.LittleEndian.PutUint64(id, height) - copy(id[8:], commitment) - return id -} - -// SplitID splits an ID into a height and a commitment. -// if len(id) <= 8, it returns 0 and nil. -func SplitID(id []byte) (uint64, []byte, error) { - if len(id) <= 8 { - return 0, nil, fmt.Errorf("invalid ID length: %d", len(id)) - } - commitment := id[8:] - return binary.LittleEndian.Uint64(id[:8]), commitment, nil -} diff --git a/core/da/errors.go b/core/da/errors.go deleted file mode 100644 index beac62be54..0000000000 --- a/core/da/errors.go +++ /dev/null @@ -1,16 +0,0 @@ -package da - -import ( - "errors" -) - -var ( - ErrBlobNotFound = errors.New("blob: not found") - ErrBlobSizeOverLimit = errors.New("blob: over size limit") - ErrTxTimedOut = errors.New("timed out waiting for tx to be included in a block") - ErrTxAlreadyInMempool = errors.New("tx already in mempool") - ErrTxIncorrectAccountSequence = errors.New("incorrect account sequence") - ErrContextDeadline = errors.New("context deadline") - ErrHeightFromFuture = errors.New("given height is from the future") - ErrContextCanceled = errors.New("context canceled") -) diff --git a/core/da/namespace.go b/core/da/namespace.go deleted file mode 100644 index 057bb29365..0000000000 --- a/core/da/namespace.go +++ /dev/null @@ -1,129 +0,0 @@ -package da - -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "strings" -) - -// Implemented in accordance to https://celestiaorg.github.io/celestia-app/namespace.html - -const ( - // NamespaceVersionIndex is the index of the namespace version in the byte slice - NamespaceVersionIndex = 0 - // NamespaceVersionSize is the size of the namespace version in bytes - NamespaceVersionSize = 1 - // NamespaceIDSize is the size of the namespace ID in bytes - NamespaceIDSize = 28 - // NamespaceSize is the total size of a namespace (version + ID) in bytes - NamespaceSize = NamespaceVersionSize + NamespaceIDSize - - // NamespaceVersionZero is the only supported user-specifiable namespace version - NamespaceVersionZero = uint8(0) - // NamespaceVersionMax is the max namespace version - NamespaceVersionMax = uint8(255) - - // NamespaceVersionZeroPrefixSize is the number of leading zero bytes required for version 0 - NamespaceVersionZeroPrefixSize = 18 - // NamespaceVersionZeroDataSize is the number of data bytes available for version 0 - NamespaceVersionZeroDataSize = 10 -) - -// Namespace represents a Celestia namespace -type Namespace struct { - Version uint8 - ID [NamespaceIDSize]byte -} - -// Bytes returns the namespace as a byte slice -func (n Namespace) Bytes() []byte { - result := make([]byte, NamespaceSize) - result[NamespaceVersionIndex] = n.Version - copy(result[NamespaceVersionSize:], n.ID[:]) - return result -} - -// IsValidForVersion0 checks if the namespace is valid for version 0 -// Version 0 requires the first 18 bytes of the ID to be zero -func (n Namespace) IsValidForVersion0() bool { - if n.Version != NamespaceVersionZero { - return false - } - - for i := range NamespaceVersionZeroPrefixSize { - if n.ID[i] != 0 { - return false - } - } - return true -} - -// NewNamespaceV0 creates a new version 0 namespace from the provided data -// The data should be up to 10 bytes and will be placed in the last 10 bytes of the ID -// The first 18 bytes will be zeros as required by the specification -func NewNamespaceV0(data []byte) (*Namespace, error) { - if len(data) > NamespaceVersionZeroDataSize { - return nil, fmt.Errorf("data too long for version 0 namespace: got %d bytes, max %d", - len(data), NamespaceVersionZeroDataSize) - } - - ns := &Namespace{ - Version: NamespaceVersionZero, - } - - // The first 18 bytes are already zero (Go zero-initializes) - // Copy the data to the last 10 bytes - copy(ns.ID[NamespaceVersionZeroPrefixSize:], data) - - return ns, nil -} - -// NamespaceFromBytes creates a namespace from a 29-byte slice -func NamespaceFromBytes(b []byte) (*Namespace, error) { - if len(b) != NamespaceSize { - return nil, fmt.Errorf("invalid namespace size: expected %d, got %d", NamespaceSize, len(b)) - } - - ns := &Namespace{ - Version: b[NamespaceVersionIndex], - } - copy(ns.ID[:], b[NamespaceVersionSize:]) - - // Validate if it's version 0 - if ns.Version == NamespaceVersionZero && !ns.IsValidForVersion0() { - return nil, fmt.Errorf("invalid version 0 namespace: first %d bytes of ID must be zero", - NamespaceVersionZeroPrefixSize) - } - - return ns, nil -} - -// NamespaceFromString creates a version 0 namespace from a string identifier -// The string is hashed and the first 10 bytes of the hash are used as the namespace data -func NamespaceFromString(s string) *Namespace { - // Hash the string to get consistent bytes - hash := sha256.Sum256([]byte(s)) - - // Use the first 10 bytes of the hash for the namespace data - ns, _ := NewNamespaceV0(hash[:NamespaceVersionZeroDataSize]) - return ns -} - -// HexString returns the hex representation of the namespace -func (n Namespace) HexString() string { - return "0x" + hex.EncodeToString(n.Bytes()) -} - -// ParseHexNamespace parses a hex string into a namespace -func ParseHexNamespace(hexStr string) (*Namespace, error) { - // Remove 0x prefix if present - hexStr = strings.TrimPrefix(hexStr, "0x") - - b, err := hex.DecodeString(hexStr) - if err != nil { - return nil, fmt.Errorf("invalid hex string: %w", err) - } - - return NamespaceFromBytes(b) -} diff --git a/da/celestia/adapter.go b/da/celestia/adapter.go index 472df22283..b1bd8ac6ae 100644 --- a/da/celestia/adapter.go +++ b/da/celestia/adapter.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" "github.com/rs/zerolog" ) diff --git a/da/celestia/adapter_test.go b/da/celestia/adapter_test.go index 795d4a2179..996a98adde 100644 --- a/da/celestia/adapter_test.go +++ b/da/celestia/adapter_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/da/cmd/local-da/local.go b/da/cmd/local-da/local.go index 4117419aa7..fdc954e3ca 100644 --- a/da/cmd/local-da/local.go +++ b/da/cmd/local-da/local.go @@ -13,7 +13,7 @@ import ( "sync" "time" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/rs/zerolog" ) @@ -57,7 +57,7 @@ func NewLocalDA(logger zerolog.Logger, opts ...func(*LocalDA) *LocalDA) *LocalDA return da } -var _ coreda.DA = &LocalDA{} +var _ da.DA = &LocalDA{} // validateNamespace checks that namespace is exactly 29 bytes func validateNamespace(ns []byte) error { @@ -74,7 +74,7 @@ func (d *LocalDA) MaxBlobSize(ctx context.Context) (uint64, error) { } // Get returns Blobs for given IDs. -func (d *LocalDA) Get(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda.Blob, error) { +func (d *LocalDA) Get(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("Get: invalid namespace") return nil, err @@ -82,7 +82,7 @@ func (d *LocalDA) Get(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda d.logger.Debug().Interface("ids", ids).Msg("Get called") d.mu.Lock() defer d.mu.Unlock() - blobs := make([]coreda.Blob, len(ids)) + blobs := make([]da.Blob, len(ids)) for i, id := range ids { if len(id) < 8 { d.logger.Error().Interface("id", id).Msg("Get: invalid ID length") @@ -98,7 +98,7 @@ func (d *LocalDA) Get(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda } if !found { d.logger.Warn().Interface("id", id).Uint64("height", height).Msg("Get: blob not found") - return nil, coreda.ErrBlobNotFound + return nil, da.ErrBlobNotFound } } d.logger.Debug().Int("count", len(blobs)).Msg("Get successful") @@ -106,7 +106,7 @@ func (d *LocalDA) Get(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda } // GetIDs returns IDs of Blobs at given DA height. -func (d *LocalDA) GetIDs(ctx context.Context, height uint64, ns []byte) (*coreda.GetIDsResult, error) { +func (d *LocalDA) GetIDs(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("GetIDs: invalid namespace") return nil, err @@ -117,7 +117,7 @@ func (d *LocalDA) GetIDs(ctx context.Context, height uint64, ns []byte) (*coreda if height > d.height { d.logger.Error().Uint64("requested", height).Uint64("current", d.height).Msg("GetIDs: height in future") - return nil, fmt.Errorf("height %d is in the future: %w", height, coreda.ErrHeightFromFuture) + return nil, fmt.Errorf("height %d is in the future: %w", height, da.ErrHeightFromFuture) } kvps, ok := d.data[height] @@ -126,16 +126,16 @@ func (d *LocalDA) GetIDs(ctx context.Context, height uint64, ns []byte) (*coreda return nil, nil } - ids := make([]coreda.ID, len(kvps)) + ids := make([]da.ID, len(kvps)) for i, kv := range kvps { ids[i] = kv.key } d.logger.Debug().Int("count", len(ids)).Msg("GetIDs successful") - return &coreda.GetIDsResult{IDs: ids, Timestamp: d.timestamps[height]}, nil + return &da.GetIDsResult{IDs: ids, Timestamp: d.timestamps[height]}, nil } // GetProofs returns inclusion Proofs for all Blobs located in DA at given height. -func (d *LocalDA) GetProofs(ctx context.Context, ids []coreda.ID, ns []byte) ([]coreda.Proof, error) { +func (d *LocalDA) GetProofs(ctx context.Context, ids []da.ID, ns []byte) ([]da.Proof, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("GetProofs: invalid namespace") return nil, err @@ -149,7 +149,7 @@ func (d *LocalDA) GetProofs(ctx context.Context, ids []coreda.ID, ns []byte) ([] d.mu.Lock() defer d.mu.Unlock() - proofs := make([]coreda.Proof, len(blobs)) + proofs := make([]da.Proof, len(blobs)) for i, blob := range blobs { proofs[i] = d.getProof(ids[i], blob) } @@ -158,13 +158,13 @@ func (d *LocalDA) GetProofs(ctx context.Context, ids []coreda.ID, ns []byte) ([] } // Commit returns cryptographic Commitments for given blobs. -func (d *LocalDA) Commit(ctx context.Context, blobs []coreda.Blob, ns []byte) ([]coreda.Commitment, error) { +func (d *LocalDA) Commit(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("Commit: invalid namespace") return nil, err } d.logger.Debug().Int("numBlobs", len(blobs)).Msg("Commit called") - commits := make([]coreda.Commitment, len(blobs)) + commits := make([]da.Commitment, len(blobs)) for i, blob := range blobs { commits[i] = d.getHash(blob) } @@ -173,7 +173,7 @@ func (d *LocalDA) Commit(ctx context.Context, blobs []coreda.Blob, ns []byte) ([ } // SubmitWithOptions stores blobs in DA layer (options are ignored). -func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, gasPrice float64, ns []byte, _ []byte) ([]coreda.ID, error) { +func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte, _ []byte) ([]da.ID, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("SubmitWithOptions: invalid namespace") return nil, err @@ -184,13 +184,13 @@ func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, ga for i, blob := range blobs { if uint64(len(blob)) > d.maxBlobSize { d.logger.Error().Int("blobIndex", i).Int("blobSize", len(blob)).Uint64("maxBlobSize", d.maxBlobSize).Msg("SubmitWithOptions: blob size exceeds limit") - return nil, coreda.ErrBlobSizeOverLimit + return nil, da.ErrBlobSizeOverLimit } } d.mu.Lock() defer d.mu.Unlock() - ids := make([]coreda.ID, len(blobs)) + ids := make([]da.ID, len(blobs)) d.height += 1 d.timestamps[d.height] = time.Now() for i, blob := range blobs { @@ -203,7 +203,7 @@ func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, ga } // Submit stores blobs in DA layer (options are ignored). -func (d *LocalDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice float64, ns []byte) ([]coreda.ID, error) { +func (d *LocalDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("Submit: invalid namespace") return nil, err @@ -214,13 +214,13 @@ func (d *LocalDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice floa for i, blob := range blobs { if uint64(len(blob)) > d.maxBlobSize { d.logger.Error().Int("blobIndex", i).Int("blobSize", len(blob)).Uint64("maxBlobSize", d.maxBlobSize).Msg("Submit: blob size exceeds limit") - return nil, coreda.ErrBlobSizeOverLimit + return nil, da.ErrBlobSizeOverLimit } } d.mu.Lock() defer d.mu.Unlock() - ids := make([]coreda.ID, len(blobs)) + ids := make([]da.ID, len(blobs)) d.height += 1 d.timestamps[d.height] = time.Now() for i, blob := range blobs { @@ -233,7 +233,7 @@ func (d *LocalDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice floa } // Validate checks the Proofs for given IDs. -func (d *LocalDA) Validate(ctx context.Context, ids []coreda.ID, proofs []coreda.Proof, ns []byte) ([]bool, error) { +func (d *LocalDA) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, ns []byte) ([]bool, error) { if err := validateNamespace(ns); err != nil { d.logger.Error().Err(err).Msg("Validate: invalid namespace") return nil, err diff --git a/da/cmd/local-da/server.go b/da/cmd/local-da/server.go index 9066f01e72..f4e7b9d338 100644 --- a/da/cmd/local-da/server.go +++ b/da/cmd/local-da/server.go @@ -10,7 +10,7 @@ import ( "github.com/filecoin-project/go-jsonrpc" "github.com/rs/zerolog" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" ) // Server is a jsonrpc service that serves the LocalDA implementation diff --git a/da/da.go b/da/da.go new file mode 100644 index 0000000000..b1d1b6edb3 --- /dev/null +++ b/da/da.go @@ -0,0 +1,232 @@ +package da + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "strings" + "time" +) + +// DA defines the interface for interaction with Data Availability layers. +type DA interface { + // Get returns Blob for each given ID, or an error. + Get(ctx context.Context, ids []ID, namespace []byte) ([]Blob, error) + + // GetIDs returns IDs of all Blobs located in DA at given height. + GetIDs(ctx context.Context, height uint64, namespace []byte) (*GetIDsResult, error) + + // GetProofs returns inclusion Proofs for Blobs specified by their IDs. + GetProofs(ctx context.Context, ids []ID, namespace []byte) ([]Proof, error) + + // Commit creates a Commitment for each given Blob. + Commit(ctx context.Context, blobs []Blob, namespace []byte) ([]Commitment, error) + + // Submit submits the Blobs to Data Availability layer. + Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ([]ID, error) + + // SubmitWithOptions submits the Blobs to Data Availability layer with additional options. + SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ([]ID, error) + + // Validate validates Commitments against the corresponding Proofs. + Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) +} + +// Blob is the data submitted/received from DA interface. +type Blob = []byte + +// ID should contain serialized data required by the implementation to find blob in Data Availability layer. +type ID = []byte + +// Commitment should contain serialized cryptographic commitment to Blob value. +type Commitment = []byte + +// Proof should contain serialized proof of inclusion (publication) of Blob in Data Availability layer. +type Proof = []byte + +// GetIDsResult holds the result of GetIDs call: IDs and timestamp of corresponding block. +type GetIDsResult struct { + IDs []ID + Timestamp time.Time +} + +// ResultSubmit contains information returned from DA layer after block headers/data submission. +type ResultSubmit struct { + BaseResult +} + +// ResultRetrieve contains batch of block headers returned from DA layer client. +type ResultRetrieve struct { + BaseResult + // Data is the block data retrieved from Data Availability Layer. + Data [][]byte +} + +// StatusCode is a type for DA layer return status. +type StatusCode uint64 + +// Data Availability return codes. +const ( + StatusUnknown StatusCode = iota + StatusSuccess + StatusNotFound + StatusNotIncludedInBlock + StatusAlreadyInMempool + StatusTooBig + StatusContextDeadline + StatusError + StatusIncorrectAccountSequence + StatusContextCanceled + StatusHeightFromFuture +) + +// BaseResult contains basic information returned by DA layer. +type BaseResult struct { + Code StatusCode + Message string + Height uint64 + SubmittedCount uint64 + BlobSize uint64 + IDs [][]byte + Timestamp time.Time +} + +// makeID creates an ID from a height and a commitment. +func makeID(height uint64, commitment []byte) []byte { + id := make([]byte, len(commitment)+8) + binary.LittleEndian.PutUint64(id, height) + copy(id[8:], commitment) + return id +} + +// SplitID splits an ID into a height and a commitment. +func SplitID(id []byte) (uint64, []byte, error) { + if len(id) <= 8 { + return 0, nil, fmt.Errorf("invalid ID length: %d", len(id)) + } + commitment := id[8:] + return binary.LittleEndian.Uint64(id[:8]), commitment, nil +} + +// Errors +var ( + ErrBlobNotFound = errors.New("blob: not found") + ErrBlobSizeOverLimit = errors.New("blob: over size limit") + ErrTxTimedOut = errors.New("timed out waiting for tx to be included in a block") + ErrTxAlreadyInMempool = errors.New("tx already in mempool") + ErrTxIncorrectAccountSequence = errors.New("incorrect account sequence") + ErrContextDeadline = errors.New("context deadline") + ErrHeightFromFuture = errors.New("given height is from the future") + ErrContextCanceled = errors.New("context canceled") +) + +// Namespace constants and types +const ( + // NamespaceVersionIndex is the index of the namespace version in the byte slice + NamespaceVersionIndex = 0 + // NamespaceVersionSize is the size of the namespace version in bytes + NamespaceVersionSize = 1 + // NamespaceIDSize is the size of the namespace ID in bytes + NamespaceIDSize = 28 + // NamespaceSize is the total size of a namespace (version + ID) in bytes + NamespaceSize = NamespaceVersionSize + NamespaceIDSize + + // NamespaceVersionZero is the only supported user-specifiable namespace version + NamespaceVersionZero = uint8(0) + // NamespaceVersionMax is the max namespace version + NamespaceVersionMax = uint8(255) + + // NamespaceVersionZeroPrefixSize is the number of leading zero bytes required for version 0 + NamespaceVersionZeroPrefixSize = 18 + // NamespaceVersionZeroDataSize is the number of data bytes available for version 0 + NamespaceVersionZeroDataSize = 10 +) + +// Namespace represents a Celestia namespace +type Namespace struct { + Version uint8 + ID [NamespaceIDSize]byte +} + +// Bytes returns the namespace as a byte slice +func (n Namespace) Bytes() []byte { + result := make([]byte, NamespaceSize) + result[NamespaceVersionIndex] = n.Version + copy(result[NamespaceVersionSize:], n.ID[:]) + return result +} + +// IsValidForVersion0 checks if the namespace is valid for version 0 +func (n Namespace) IsValidForVersion0() bool { + if n.Version != NamespaceVersionZero { + return false + } + + for i := range NamespaceVersionZeroPrefixSize { + if n.ID[i] != 0 { + return false + } + } + return true +} + +// NewNamespaceV0 creates a new version 0 namespace from the provided data +func NewNamespaceV0(data []byte) (*Namespace, error) { + if len(data) > NamespaceVersionZeroDataSize { + return nil, fmt.Errorf("data too long for version 0 namespace: got %d bytes, max %d", + len(data), NamespaceVersionZeroDataSize) + } + + ns := &Namespace{ + Version: NamespaceVersionZero, + } + + copy(ns.ID[NamespaceVersionZeroPrefixSize:], data) + return ns, nil +} + +// NamespaceFromBytes creates a namespace from a 29-byte slice +func NamespaceFromBytes(b []byte) (*Namespace, error) { + if len(b) != NamespaceSize { + return nil, fmt.Errorf("invalid namespace size: expected %d, got %d", NamespaceSize, len(b)) + } + + ns := &Namespace{ + Version: b[NamespaceVersionIndex], + } + copy(ns.ID[:], b[NamespaceVersionSize:]) + + if ns.Version == NamespaceVersionZero && !ns.IsValidForVersion0() { + return nil, fmt.Errorf("invalid version 0 namespace: first %d bytes of ID must be zero", + NamespaceVersionZeroPrefixSize) + } + + return ns, nil +} + +// NamespaceFromString creates a version 0 namespace from a string identifier +func NamespaceFromString(s string) *Namespace { + hash := sha256.Sum256([]byte(s)) + ns, _ := NewNamespaceV0(hash[:NamespaceVersionZeroDataSize]) + return ns +} + +// HexString returns the hex representation of the namespace +func (n Namespace) HexString() string { + return "0x" + hex.EncodeToString(n.Bytes()) +} + +// ParseHexNamespace parses a hex string into a namespace +func ParseHexNamespace(hexStr string) (*Namespace, error) { + hexStr = strings.TrimPrefix(hexStr, "0x") + + b, err := hex.DecodeString(hexStr) + if err != nil { + return nil, fmt.Errorf("invalid hex string: %w", err) + } + + return NamespaceFromBytes(b) +} diff --git a/core/da/namespace_test.go b/da/da_test.go similarity index 100% rename from core/da/namespace_test.go rename to da/da_test.go diff --git a/da/internal/mocks/da.go b/da/internal/mocks/da.go index 37539d5480..242cdace46 100644 --- a/da/internal/mocks/da.go +++ b/da/internal/mocks/da.go @@ -7,7 +7,7 @@ package mocks import ( "context" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" mock "github.com/stretchr/testify/mock" ) diff --git a/core/da/dummy.go b/da/testing.go similarity index 100% rename from core/da/dummy.go rename to da/testing.go diff --git a/core/da/dummy_test.go b/da/testing_test.go similarity index 100% rename from core/da/dummy_test.go rename to da/testing_test.go diff --git a/node/full.go b/node/full.go index 2097c24b59..5366b8d7cb 100644 --- a/node/full.go +++ b/node/full.go @@ -18,7 +18,7 @@ import ( "github.com/evstack/ev-node/block" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" @@ -53,7 +53,7 @@ type FullNode struct { nodeConfig config.Config - da coreda.DA + da da.DA p2pClient *p2p.Client hSyncService *evsync.HeaderSyncService @@ -75,7 +75,7 @@ func newFullNode( database ds.Batching, exec coreexecutor.Executor, sequencer coresequencer.Sequencer, - da coreda.DA, + da da.DA, metricsProvider MetricsProvider, logger zerolog.Logger, nodeOpts NodeOptions, diff --git a/node/helpers_test.go b/node/helpers_test.go index e77744a4ec..e9f20ff3c9 100644 --- a/node/helpers_test.go +++ b/node/helpers_test.go @@ -17,7 +17,7 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" @@ -43,10 +43,10 @@ const ( ) // createTestComponents creates test components for node initialization -func createTestComponents(t *testing.T, config evconfig.Config) (coreexecutor.Executor, coresequencer.Sequencer, coreda.DA, *p2p.Client, datastore.Batching, *key.NodeKey, func()) { +func createTestComponents(t *testing.T, config evconfig.Config) (coreexecutor.Executor, coresequencer.Sequencer, da.DA, *p2p.Client, datastore.Batching, *key.NodeKey, func()) { executor := coreexecutor.NewDummyExecutor() sequencer := coresequencer.NewDummySequencer() - dummyDA := coreda.NewDummyDA(100_000, config.DA.BlockTime.Duration) + dummyDA := da.NewDummyDA(100_000, config.DA.BlockTime.Duration) dummyDA.StartHeightTicker() stopDAHeightTicker := func() { @@ -101,7 +101,7 @@ func newTestNode( config evconfig.Config, executor coreexecutor.Executor, sequencer coresequencer.Sequencer, - dac coreda.DA, + dac da.DA, p2pClient *p2p.Client, ds datastore.Batching, stopDAHeightTicker func(), @@ -145,7 +145,7 @@ func createNodeWithCustomComponents( config evconfig.Config, executor coreexecutor.Executor, sequencer coresequencer.Sequencer, - dac coreda.DA, + dac da.DA, p2pClient *p2p.Client, ds datastore.Batching, stopDAHeightTicker func(), diff --git a/node/node.go b/node/node.go index 4d780035aa..67111d9b51 100644 --- a/node/node.go +++ b/node/node.go @@ -5,7 +5,7 @@ import ( "github.com/rs/zerolog" "github.com/evstack/ev-node/block" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" @@ -33,7 +33,7 @@ func NewNode( conf config.Config, exec coreexecutor.Executor, sequencer coresequencer.Sequencer, - da coreda.DA, + da da.DA, signer signer.Signer, p2pClient *p2p.Client, genesis genesis.Genesis, diff --git a/node/single_sequencer_integration_test.go b/node/single_sequencer_integration_test.go index 22b2fd4506..c202ec951f 100644 --- a/node/single_sequencer_integration_test.go +++ b/node/single_sequencer_integration_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" evconfig "github.com/evstack/ev-node/pkg/config" ) @@ -321,7 +321,7 @@ func TestBatchQueueThrottlingWithDAFailure(t *testing.T) { require.True(ok, "Expected DummyExecutor implementation") // Cast dummyDA to our enhanced version so we can make it fail - dummyDAImpl, ok := dummyDA.(*coreda.DummyDA) + dummyDAImpl, ok := dummyDA.(*da.DummyDA) require.True(ok, "Expected DummyDA implementation") // Create node with components diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index e0efe0b652..76ee83edb1 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -15,7 +15,7 @@ import ( "github.com/rs/zerolog" "github.com/spf13/cobra" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/node" @@ -81,7 +81,7 @@ func StartNode( cmd *cobra.Command, executor coreexecutor.Executor, sequencer coresequencer.Sequencer, - da coreda.DA, + da da.DA, p2pClient *p2p.Client, datastore datastore.Batching, nodeConfig rollconf.Config, diff --git a/pkg/cmd/run_node_test.go b/pkg/cmd/run_node_test.go index 2980f94741..b433f80757 100644 --- a/pkg/cmd/run_node_test.go +++ b/pkg/cmd/run_node_test.go @@ -13,7 +13,7 @@ import ( "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/node" @@ -26,10 +26,10 @@ import ( const MockDANamespace = "test" -func createTestComponents(_ context.Context, t *testing.T) (coreexecutor.Executor, coresequencer.Sequencer, coreda.DA, signer.Signer, *p2p.Client, datastore.Batching, func()) { +func createTestComponents(_ context.Context, t *testing.T) (coreexecutor.Executor, coresequencer.Sequencer, da.DA, signer.Signer, *p2p.Client, datastore.Batching, func()) { executor := coreexecutor.NewDummyExecutor() sequencer := coresequencer.NewDummySequencer() - dummyDA := coreda.NewDummyDA(100_000, 10*time.Second) + dummyDA := da.NewDummyDA(100_000, 10*time.Second) dummyDA.StartHeightTicker() stopDAHeightTicker := func() { dummyDA.StopHeightTicker() @@ -687,7 +687,7 @@ func newRunNodeCmd( ctx context.Context, executor coreexecutor.Executor, sequencer coresequencer.Sequencer, - dac coreda.DA, + dac da.DA, remoteSigner signer.Signer, p2pClient *p2p.Client, datastore datastore.Batching, diff --git a/pkg/config/config.go b/pkg/config/config.go index aad4ce6b93..9cb885322f 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -11,7 +11,7 @@ import ( "time" "github.com/celestiaorg/go-square/v3/share" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" "github.com/mitchellh/mapstructure" "github.com/spf13/cobra" "github.com/spf13/pflag" diff --git a/pkg/rpc/server/da_visualization.go b/pkg/rpc/server/da_visualization.go index ea003c6460..f31830d544 100644 --- a/pkg/rpc/server/da_visualization.go +++ b/pkg/rpc/server/da_visualization.go @@ -11,7 +11,7 @@ import ( "sync" "time" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/rs/zerolog" ) @@ -33,7 +33,7 @@ type DASubmissionInfo struct { // DAVisualizationServer provides DA layer visualization endpoints type DAVisualizationServer struct { - da coreda.DA + da da.DA logger zerolog.Logger submissions []DASubmissionInfo mutex sync.RWMutex @@ -41,7 +41,7 @@ type DAVisualizationServer struct { } // NewDAVisualizationServer creates a new DA visualization server -func NewDAVisualizationServer(da coreda.DA, logger zerolog.Logger, isAggregator bool) *DAVisualizationServer { +func NewDAVisualizationServer(da da.DA, logger zerolog.Logger, isAggregator bool) *DAVisualizationServer { return &DAVisualizationServer{ da: da, logger: logger, @@ -52,7 +52,7 @@ func NewDAVisualizationServer(da coreda.DA, logger zerolog.Logger, isAggregator // RecordSubmission records a DA submission for visualization // Only keeps the last 100 submissions in memory for the dashboard display -func (s *DAVisualizationServer) RecordSubmission(result *coreda.ResultSubmit, gasPrice float64, numBlobs uint64) { +func (s *DAVisualizationServer) RecordSubmission(result *da.ResultSubmit, gasPrice float64, numBlobs uint64) { s.mutex.Lock() defer s.mutex.Unlock() @@ -83,27 +83,27 @@ func (s *DAVisualizationServer) RecordSubmission(result *coreda.ResultSubmit, ga } // getStatusCodeString converts status code to human-readable string -func (s *DAVisualizationServer) getStatusCodeString(code coreda.StatusCode) string { +func (s *DAVisualizationServer) getStatusCodeString(code da.StatusCode) string { switch code { - case coreda.StatusSuccess: + case da.StatusSuccess: return "Success" - case coreda.StatusNotFound: + case da.StatusNotFound: return "Not Found" - case coreda.StatusNotIncludedInBlock: + case da.StatusNotIncludedInBlock: return "Not Included In Block" - case coreda.StatusAlreadyInMempool: + case da.StatusAlreadyInMempool: return "Already In Mempool" - case coreda.StatusTooBig: + case da.StatusTooBig: return "Too Big" - case coreda.StatusContextDeadline: + case da.StatusContextDeadline: return "Context Deadline" - case coreda.StatusError: + case da.StatusError: return "Error" - case coreda.StatusIncorrectAccountSequence: + case da.StatusIncorrectAccountSequence: return "Incorrect Account Sequence" - case coreda.StatusContextCanceled: + case da.StatusContextCanceled: return "Context Canceled" - case coreda.StatusHeightFromFuture: + case da.StatusHeightFromFuture: return "Height From Future" default: return "Unknown" @@ -173,7 +173,7 @@ func (s *DAVisualizationServer) handleDABlobDetails(w http.ResponseWriter, r *ht // Extract namespace - using empty namespace for now, could be parameterized namespace := []byte{} - blobs, err := s.da.Get(ctx, []coreda.ID{id}, namespace) + blobs, err := s.da.Get(ctx, []da.ID{id}, namespace) if err != nil { s.logger.Error().Err(err).Str("blob_id", blobID).Msg("Failed to retrieve blob from DA") http.Error(w, fmt.Sprintf("Failed to retrieve blob: %v", err), http.StatusInternalServerError) @@ -186,7 +186,7 @@ func (s *DAVisualizationServer) handleDABlobDetails(w http.ResponseWriter, r *ht } // Parse the blob ID to extract height and commitment - height, commitment, err := coreda.SplitID(id) + height, commitment, err := da.SplitID(id) if err != nil { s.logger.Error().Err(err).Str("blob_id", blobID).Msg("Failed to split blob ID") } diff --git a/pkg/rpc/server/da_visualization_test.go b/pkg/rpc/server/da_visualization_test.go index 80b9a1408c..1a5f46eae0 100644 --- a/pkg/rpc/server/da_visualization_test.go +++ b/pkg/rpc/server/da_visualization_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/test/mocks" "github.com/rs/zerolog" @@ -34,9 +34,9 @@ func TestRecordSubmission(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) // Test recording a successful submission - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, Height: 100, BlobSize: 1024, Timestamp: time.Now(), @@ -67,9 +67,9 @@ func TestRecordSubmissionMemoryLimit(t *testing.T) { // Add 101 submissions (more than the limit of 100) for i := 0; i < 101; i++ { - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, Height: uint64(i), BlobSize: uint64(i * 10), Timestamp: time.Now(), @@ -92,15 +92,15 @@ func TestGetStatusCodeString(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) tests := []struct { - code coreda.StatusCode + code da.StatusCode expected string }{ - {coreda.StatusSuccess, "Success"}, - {coreda.StatusNotFound, "Not Found"}, - {coreda.StatusError, "Error"}, - {coreda.StatusTooBig, "Too Big"}, - {coreda.StatusContextDeadline, "Context Deadline"}, - {coreda.StatusUnknown, "Unknown"}, + {da.StatusSuccess, "Success"}, + {da.StatusNotFound, "Not Found"}, + {da.StatusError, "Error"}, + {da.StatusTooBig, "Too Big"}, + {da.StatusContextDeadline, "Context Deadline"}, + {da.StatusUnknown, "Unknown"}, } for _, tt := range tests { @@ -115,9 +115,9 @@ func TestHandleDASubmissions(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) // Add a test submission - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, Height: 100, BlobSize: 1024, Timestamp: time.Now(), @@ -188,9 +188,9 @@ func TestHandleDAVisualizationHTML(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) // Add a test submission - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, Height: 100, BlobSize: 1024, Timestamp: time.Now(), @@ -239,9 +239,9 @@ func TestRegisterCustomHTTPEndpointsDAVisualization(t *testing.T) { server := NewDAVisualizationServer(da, logger, true) // Add test submission - result := &coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + result := &da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, Height: 100, BlobSize: 1024, Timestamp: time.Now(), diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index f649fda37d..5b1758d4bd 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -12,7 +12,7 @@ import ( "connectrpc.com/connect" "connectrpc.com/grpcreflect" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" "golang.org/x/net/http2" @@ -206,8 +206,8 @@ func (cs *ConfigServer) GetNamespace( req *connect.Request[emptypb.Empty], ) (*connect.Response[pb.GetNamespaceResponse], error) { - hns := coreda.NamespaceFromString(cs.config.DA.GetNamespace()) - dns := coreda.NamespaceFromString(cs.config.DA.GetDataNamespace()) + hns := da.NamespaceFromString(cs.config.DA.GetNamespace()) + dns := da.NamespaceFromString(cs.config.DA.GetDataNamespace()) return connect.NewResponse(&pb.GetNamespaceResponse{ HeaderNamespace: hns.HexString(), diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index dbc5bc567c..c8db9708c1 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -10,7 +10,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coresequencer "github.com/evstack/ev-node/core/sequencer" ) @@ -28,7 +28,7 @@ type Sequencer struct { proposer bool Id []byte - da coreda.DA + da da.DA batchTime time.Duration @@ -42,7 +42,7 @@ func NewSequencer( ctx context.Context, logger zerolog.Logger, db ds.Batching, - da coreda.DA, + da da.DA, id []byte, batchTime time.Duration, metrics *Metrics, @@ -56,7 +56,7 @@ func NewSequencerWithQueueSize( ctx context.Context, logger zerolog.Logger, db ds.Batching, - da coreda.DA, + da da.DA, id []byte, batchTime time.Duration, metrics *Metrics, @@ -130,7 +130,7 @@ func (c *Sequencer) GetNextBatch(ctx context.Context, req coresequencer.GetNextB // RecordMetrics updates the metrics with the given values. // This method is intended to be called by the block manager after submitting data to the DA layer. -func (c *Sequencer) RecordMetrics(gasPrice float64, blobSize uint64, statusCode coreda.StatusCode, numPendingBlocks uint64, includedBlockHeight uint64) { +func (c *Sequencer) RecordMetrics(gasPrice float64, blobSize uint64, statusCode da.StatusCode, numPendingBlocks uint64, includedBlockHeight uint64) { if c.metrics != nil { c.metrics.GasPrice.Set(gasPrice) c.metrics.LastBlobSize.Set(float64(blobSize)) diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 3904a62f73..f31cd6af79 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -13,14 +13,14 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" coresequencer "github.com/evstack/ev-node/core/sequencer" damocks "github.com/evstack/ev-node/test/mocks" ) func TestNewSequencer(t *testing.T) { // Create a new sequencer with mock DA client - dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) + dummyDA := da.NewDummyDA(100_000_000, 10*time.Second) metrics, _ := NopMetrics() db := ds.NewMapDatastore() ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -53,7 +53,7 @@ func TestNewSequencer(t *testing.T) { func TestSequencer_SubmitBatchTxs(t *testing.T) { // Initialize a new sequencer metrics, _ := NopMetrics() - dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) + dummyDA := da.NewDummyDA(100_000_000, 10*time.Second) db := ds.NewMapDatastore() ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() @@ -106,7 +106,7 @@ func TestSequencer_SubmitBatchTxs(t *testing.T) { func TestSequencer_SubmitBatchTxs_EmptyBatch(t *testing.T) { // Initialize a new sequencer metrics, _ := NopMetrics() - dummyDA := coreda.NewDummyDA(100_000_000, 10*time.Second) + dummyDA := da.NewDummyDA(100_000_000, 10*time.Second) db := ds.NewMapDatastore() ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() @@ -447,7 +447,7 @@ func TestSequencer_RecordMetrics(t *testing.T) { // Test values gasPrice := 1.5 blobSize := uint64(1024) - statusCode := coreda.StatusSuccess + statusCode := da.StatusSuccess numPendingBlocks := uint64(5) includedBlockHeight := uint64(100) @@ -470,7 +470,7 @@ func TestSequencer_RecordMetrics(t *testing.T) { // Test values gasPrice := 2.0 blobSize := uint64(2048) - statusCode := coreda.StatusNotIncludedInBlock + statusCode := da.StatusNotIncludedInBlock numPendingBlocks := uint64(3) includedBlockHeight := uint64(200) @@ -495,13 +495,13 @@ func TestSequencer_RecordMetrics(t *testing.T) { // Test different status codes testCases := []struct { name string - statusCode coreda.StatusCode + statusCode da.StatusCode }{ - {"Success", coreda.StatusSuccess}, - {"NotIncluded", coreda.StatusNotIncludedInBlock}, - {"AlreadyInMempool", coreda.StatusAlreadyInMempool}, - {"TooBig", coreda.StatusTooBig}, - {"ContextCanceled", coreda.StatusContextCanceled}, + {"Success", da.StatusSuccess}, + {"NotIncluded", da.StatusNotIncludedInBlock}, + {"AlreadyInMempool", da.StatusAlreadyInMempool}, + {"TooBig", da.StatusTooBig}, + {"ContextCanceled", da.StatusContextCanceled}, } for _, tc := range testCases { @@ -634,7 +634,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) { defer db.Close() // Create a dummy DA that we can make fail - dummyDA := coreda.NewDummyDA(100_000, 100*time.Millisecond) + dummyDA := da.NewDummyDA(100_000, 100*time.Millisecond) dummyDA.StartHeightTicker() defer dummyDA.StopHeightTicker() diff --git a/test/mocks/da.go b/test/mocks/da.go index 37539d5480..242cdace46 100644 --- a/test/mocks/da.go +++ b/test/mocks/da.go @@ -7,7 +7,7 @@ package mocks import ( "context" - "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da" mock "github.com/stretchr/testify/mock" ) diff --git a/types/da.go b/types/da.go index e0d58710d9..2d3c1ba161 100644 --- a/types/da.go +++ b/types/da.go @@ -9,7 +9,7 @@ import ( "github.com/rs/zerolog" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" ) // SubmitWithHelpers performs blob submission using the underlying DA layer, @@ -18,14 +18,14 @@ import ( // It mimics the logic previously found in da.DAClient.Submit. func SubmitWithHelpers( ctx context.Context, - da coreda.DA, // Use the core DA interface + daLayer da.DA, logger zerolog.Logger, data [][]byte, gasPrice float64, namespace []byte, options []byte, -) coreda.ResultSubmit { // Return core ResultSubmit type - ids, err := da.SubmitWithOptions(ctx, data, gasPrice, namespace, options) +) da.ResultSubmit { + ids, err := daLayer.SubmitWithOptions(ctx, data, gasPrice, namespace, options) // calculate blob size var blobSize uint64 @@ -37,37 +37,37 @@ func SubmitWithHelpers( if err != nil { if errors.Is(err, context.Canceled) { logger.Debug().Msg("DA submission canceled via helper due to context cancellation") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusContextCanceled, + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusContextCanceled, Message: "submission canceled", IDs: ids, BlobSize: blobSize, }, } } - status := coreda.StatusError + status := da.StatusError switch { - case errors.Is(err, coreda.ErrTxTimedOut): - status = coreda.StatusNotIncludedInBlock - case errors.Is(err, coreda.ErrTxAlreadyInMempool): - status = coreda.StatusAlreadyInMempool - case errors.Is(err, coreda.ErrTxIncorrectAccountSequence): - status = coreda.StatusIncorrectAccountSequence - case errors.Is(err, coreda.ErrBlobSizeOverLimit): - status = coreda.StatusTooBig - case errors.Is(err, coreda.ErrContextDeadline): - status = coreda.StatusContextDeadline + case errors.Is(err, da.ErrTxTimedOut): + status = da.StatusNotIncludedInBlock + case errors.Is(err, da.ErrTxAlreadyInMempool): + status = da.StatusAlreadyInMempool + case errors.Is(err, da.ErrTxIncorrectAccountSequence): + status = da.StatusIncorrectAccountSequence + case errors.Is(err, da.ErrBlobSizeOverLimit): + status = da.StatusTooBig + case errors.Is(err, da.ErrContextDeadline): + status = da.StatusContextDeadline } // Use debug level for StatusTooBig as it gets handled later in submitToDA through recursive splitting - if status == coreda.StatusTooBig { + if status == da.StatusTooBig { logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed via helper") } else { logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed via helper") } - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ + return da.ResultSubmit{ + BaseResult: da.BaseResult{ Code: status, Message: "failed to submit blobs: " + err.Error(), IDs: ids, @@ -81,9 +81,9 @@ func SubmitWithHelpers( if len(ids) == 0 && len(data) > 0 { logger.Warn().Msg("DA submission via helper returned no IDs for non-empty input data") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusError, Message: "failed to submit blobs: no IDs returned despite non-empty input", }, } @@ -92,16 +92,16 @@ func SubmitWithHelpers( // Get height from the first ID var height uint64 if len(ids) > 0 { - height, _, err = coreda.SplitID(ids[0]) + height, _, err = da.SplitID(ids[0]) if err != nil { logger.Error().Err(err).Msg("failed to split ID") } } logger.Debug().Int("num_ids", len(ids)).Msg("DA submission successful via helper") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, IDs: ids, SubmittedCount: uint64(len(ids)), Height: height, @@ -117,35 +117,35 @@ func SubmitWithHelpers( // requestTimeout defines the timeout for the each retrieval request. func RetrieveWithHelpers( ctx context.Context, - da coreda.DA, + daLayer da.DA, logger zerolog.Logger, dataLayerHeight uint64, namespace []byte, requestTimeout time.Duration, -) coreda.ResultRetrieve { +) da.ResultRetrieve { // 1. Get IDs getIDsCtx, cancel := context.WithTimeout(ctx, requestTimeout) defer cancel() - idsResult, err := da.GetIDs(getIDsCtx, dataLayerHeight, namespace) + idsResult, err := daLayer.GetIDs(getIDsCtx, dataLayerHeight, namespace) if err != nil { // Handle specific "not found" error - if strings.Contains(err.Error(), coreda.ErrBlobNotFound.Error()) { + if strings.Contains(err.Error(), da.ErrBlobNotFound.Error()) { logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusNotFound, + Message: da.ErrBlobNotFound.Error(), Height: dataLayerHeight, Timestamp: time.Now(), }, } } - if strings.Contains(err.Error(), coreda.ErrHeightFromFuture.Error()) { + if strings.Contains(err.Error(), da.ErrHeightFromFuture.Error()) { logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusHeightFromFuture, - Message: coreda.ErrHeightFromFuture.Error(), + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusHeightFromFuture, + Message: da.ErrHeightFromFuture.Error(), Height: dataLayerHeight, Timestamp: time.Now(), }, @@ -153,9 +153,9 @@ func RetrieveWithHelpers( } // Handle other errors during GetIDs logger.Error().Uint64("height", dataLayerHeight).Err(err).Msg("Retrieve helper: Failed to get IDs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, Message: fmt.Sprintf("failed to get IDs: %s", err.Error()), Height: dataLayerHeight, Timestamp: time.Now(), @@ -166,10 +166,10 @@ func RetrieveWithHelpers( // This check should technically be redundant if GetIDs correctly returns ErrBlobNotFound if idsResult == nil || len(idsResult.IDs) == 0 { logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: No IDs found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusNotFound, + Message: da.ErrBlobNotFound.Error(), Height: dataLayerHeight, Timestamp: time.Now(), }, @@ -182,14 +182,14 @@ func RetrieveWithHelpers( end := min(i+batchSize, len(idsResult.IDs)) getBlobsCtx, cancel := context.WithTimeout(ctx, requestTimeout) - batchBlobs, err := da.Get(getBlobsCtx, idsResult.IDs[i:end], namespace) + batchBlobs, err := daLayer.Get(getBlobsCtx, idsResult.IDs[i:end], namespace) cancel() if err != nil { // Handle errors during Get logger.Error().Uint64("height", dataLayerHeight).Int("num_ids", len(idsResult.IDs)).Err(err).Msg("Retrieve helper: Failed to get blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, Message: fmt.Sprintf("failed to get blobs for batch %d-%d: %s", i, end-1, err.Error()), Height: dataLayerHeight, Timestamp: time.Now(), @@ -200,9 +200,9 @@ func RetrieveWithHelpers( } // Success logger.Debug().Uint64("height", dataLayerHeight).Int("num_blobs", len(blobs)).Msg("Retrieve helper: Successfully retrieved blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, Height: dataLayerHeight, IDs: idsResult.IDs, Timestamp: idsResult.Timestamp, diff --git a/types/da_test.go b/types/da_test.go index 4a111499dc..d26cd1f2d6 100644 --- a/types/da_test.go +++ b/types/da_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - coreda "github.com/evstack/ev-node/core/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/test/mocks" "github.com/evstack/ev-node/types" ) @@ -25,7 +25,7 @@ func TestSubmitWithHelpers(t *testing.T) { options []byte submitErr error submitIDs [][]byte - expectedCode coreda.StatusCode + expectedCode da.StatusCode expectedErrMsg string expectedIDs [][]byte expectedCount uint64 @@ -36,7 +36,7 @@ func TestSubmitWithHelpers(t *testing.T) { gasPrice: 1.0, options: []byte("opts"), submitIDs: [][]byte{[]byte("id1"), []byte("id2")}, - expectedCode: coreda.StatusSuccess, + expectedCode: da.StatusSuccess, expectedIDs: [][]byte{[]byte("id1"), []byte("id2")}, expectedCount: 2, }, @@ -46,7 +46,7 @@ func TestSubmitWithHelpers(t *testing.T) { gasPrice: 1.0, options: []byte("opts"), submitErr: context.Canceled, - expectedCode: coreda.StatusContextCanceled, + expectedCode: da.StatusContextCanceled, expectedErrMsg: "submission canceled", }, { @@ -54,45 +54,45 @@ func TestSubmitWithHelpers(t *testing.T) { data: [][]byte{[]byte("blob1")}, gasPrice: 1.0, options: []byte("opts"), - submitErr: coreda.ErrTxTimedOut, - expectedCode: coreda.StatusNotIncludedInBlock, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxTimedOut.Error(), + submitErr: da.ErrTxTimedOut, + expectedCode: da.StatusNotIncludedInBlock, + expectedErrMsg: "failed to submit blobs: " + da.ErrTxTimedOut.Error(), }, { name: "tx already in mempool error", data: [][]byte{[]byte("blob1")}, gasPrice: 1.0, options: []byte("opts"), - submitErr: coreda.ErrTxAlreadyInMempool, - expectedCode: coreda.StatusAlreadyInMempool, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxAlreadyInMempool.Error(), + submitErr: da.ErrTxAlreadyInMempool, + expectedCode: da.StatusAlreadyInMempool, + expectedErrMsg: "failed to submit blobs: " + da.ErrTxAlreadyInMempool.Error(), }, { name: "incorrect account sequence error", data: [][]byte{[]byte("blob1")}, gasPrice: 1.0, options: []byte("opts"), - submitErr: coreda.ErrTxIncorrectAccountSequence, - expectedCode: coreda.StatusIncorrectAccountSequence, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxIncorrectAccountSequence.Error(), + submitErr: da.ErrTxIncorrectAccountSequence, + expectedCode: da.StatusIncorrectAccountSequence, + expectedErrMsg: "failed to submit blobs: " + da.ErrTxIncorrectAccountSequence.Error(), }, { name: "blob size over limit error", data: [][]byte{[]byte("blob1")}, gasPrice: 1.0, options: []byte("opts"), - submitErr: coreda.ErrBlobSizeOverLimit, - expectedCode: coreda.StatusTooBig, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrBlobSizeOverLimit.Error(), + submitErr: da.ErrBlobSizeOverLimit, + expectedCode: da.StatusTooBig, + expectedErrMsg: "failed to submit blobs: " + da.ErrBlobSizeOverLimit.Error(), }, { name: "context deadline error", data: [][]byte{[]byte("blob1")}, gasPrice: 1.0, options: []byte("opts"), - submitErr: coreda.ErrContextDeadline, - expectedCode: coreda.StatusContextDeadline, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrContextDeadline.Error(), + submitErr: da.ErrContextDeadline, + expectedCode: da.StatusContextDeadline, + expectedErrMsg: "failed to submit blobs: " + da.ErrContextDeadline.Error(), }, { name: "generic submission error", @@ -100,7 +100,7 @@ func TestSubmitWithHelpers(t *testing.T) { gasPrice: 1.0, options: []byte("opts"), submitErr: errors.New("some generic error"), - expectedCode: coreda.StatusError, + expectedCode: da.StatusError, expectedErrMsg: "failed to submit blobs: some generic error", }, { @@ -109,7 +109,7 @@ func TestSubmitWithHelpers(t *testing.T) { gasPrice: 1.0, options: []byte("opts"), submitIDs: [][]byte{}, - expectedCode: coreda.StatusError, + expectedCode: da.StatusError, expectedErrMsg: "failed to submit blobs: no IDs returned despite non-empty input", }, } @@ -117,7 +117,7 @@ func TestSubmitWithHelpers(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { mockDA := mocks.NewMockDA(t) - encodedNamespace := coreda.NamespaceFromString("test-namespace") + encodedNamespace := da.NamespaceFromString("test-namespace") mockDA.On("SubmitWithOptions", mock.Anything, tc.data, tc.gasPrice, encodedNamespace.Bytes(), tc.options).Return(tc.submitIDs, tc.submitErr) @@ -147,10 +147,10 @@ func TestRetrieveWithHelpers(t *testing.T) { testCases := []struct { name string - getIDsResult *coreda.GetIDsResult + getIDsResult *da.GetIDsResult getIDsErr error getBlobsErr error - expectedCode coreda.StatusCode + expectedCode da.StatusCode expectedErrMsg string expectedIDs [][]byte expectedData [][]byte @@ -158,61 +158,61 @@ func TestRetrieveWithHelpers(t *testing.T) { }{ { name: "successful retrieval", - getIDsResult: &coreda.GetIDsResult{ + getIDsResult: &da.GetIDsResult{ IDs: mockIDs, Timestamp: mockTimestamp, }, - expectedCode: coreda.StatusSuccess, + expectedCode: da.StatusSuccess, expectedIDs: mockIDs, expectedData: mockBlobs, expectedHeight: dataLayerHeight, }, { name: "blob not found error during GetIDs", - getIDsErr: coreda.ErrBlobNotFound, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), + getIDsErr: da.ErrBlobNotFound, + expectedCode: da.StatusNotFound, + expectedErrMsg: da.ErrBlobNotFound.Error(), expectedHeight: dataLayerHeight, }, { name: "height from future error during GetIDs", - getIDsErr: coreda.ErrHeightFromFuture, - expectedCode: coreda.StatusHeightFromFuture, - expectedErrMsg: coreda.ErrHeightFromFuture.Error(), + getIDsErr: da.ErrHeightFromFuture, + expectedCode: da.StatusHeightFromFuture, + expectedErrMsg: da.ErrHeightFromFuture.Error(), expectedHeight: dataLayerHeight, }, { name: "generic error during GetIDs", getIDsErr: errors.New("failed to connect to DA"), - expectedCode: coreda.StatusError, + expectedCode: da.StatusError, expectedErrMsg: "failed to get IDs: failed to connect to DA", expectedHeight: dataLayerHeight, }, { name: "GetIDs returns nil result", getIDsResult: nil, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), + expectedCode: da.StatusNotFound, + expectedErrMsg: da.ErrBlobNotFound.Error(), expectedHeight: dataLayerHeight, }, { name: "GetIDs returns empty IDs", - getIDsResult: &coreda.GetIDsResult{ + getIDsResult: &da.GetIDsResult{ IDs: [][]byte{}, Timestamp: mockTimestamp, }, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), + expectedCode: da.StatusNotFound, + expectedErrMsg: da.ErrBlobNotFound.Error(), expectedHeight: dataLayerHeight, }, { name: "error during Get (blobs retrieval)", - getIDsResult: &coreda.GetIDsResult{ + getIDsResult: &da.GetIDsResult{ IDs: mockIDs, Timestamp: mockTimestamp, }, getBlobsErr: errors.New("network error during blob retrieval"), - expectedCode: coreda.StatusError, + expectedCode: da.StatusError, expectedErrMsg: "failed to get blobs for batch 0-1: network error during blob retrieval", expectedHeight: dataLayerHeight, }, @@ -221,7 +221,7 @@ func TestRetrieveWithHelpers(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { mockDA := mocks.NewMockDA(t) - encodedNamespace := coreda.NamespaceFromString("test-namespace") + encodedNamespace := da.NamespaceFromString("test-namespace") mockDA.On("GetIDs", mock.Anything, dataLayerHeight, mock.Anything).Return(tc.getIDsResult, tc.getIDsErr) @@ -250,7 +250,7 @@ func TestRetrieveWithHelpers(t *testing.T) { func TestRetrieveWithHelpers_Timeout(t *testing.T) { logger := zerolog.Nop() dataLayerHeight := uint64(100) - encodedNamespace := coreda.NamespaceFromString("test-namespace") + encodedNamespace := da.NamespaceFromString("test-namespace") t.Run("timeout during GetIDs", func(t *testing.T) { mockDA := mocks.NewMockDA(t) @@ -264,7 +264,7 @@ func TestRetrieveWithHelpers_Timeout(t *testing.T) { // Use a very short timeout to ensure it triggers result := types.RetrieveWithHelpers(context.Background(), mockDA, logger, dataLayerHeight, encodedNamespace.Bytes(), 1*time.Millisecond) - assert.Equal(t, coreda.StatusError, result.Code) + assert.Equal(t, da.StatusError, result.Code) assert.Contains(t, result.Message, "failed to get IDs") assert.Contains(t, result.Message, "context deadline exceeded") mockDA.AssertExpectations(t) @@ -276,7 +276,7 @@ func TestRetrieveWithHelpers_Timeout(t *testing.T) { mockTimestamp := time.Now() // Mock GetIDs to succeed - mockDA.On("GetIDs", mock.Anything, dataLayerHeight, mock.Anything).Return(&coreda.GetIDsResult{ + mockDA.On("GetIDs", mock.Anything, dataLayerHeight, mock.Anything).Return(&da.GetIDsResult{ IDs: mockIDs, Timestamp: mockTimestamp, }, nil) @@ -290,7 +290,7 @@ func TestRetrieveWithHelpers_Timeout(t *testing.T) { // Use a very short timeout to ensure it triggers result := types.RetrieveWithHelpers(context.Background(), mockDA, logger, dataLayerHeight, encodedNamespace.Bytes(), 1*time.Millisecond) - assert.Equal(t, coreda.StatusError, result.Code) + assert.Equal(t, da.StatusError, result.Code) assert.Contains(t, result.Message, "failed to get blobs for batch") assert.Contains(t, result.Message, "context deadline exceeded") mockDA.AssertExpectations(t) From ed2f787838081fb1d569ac062fe5857ab0763cc5 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Mon, 24 Nov 2025 13:38:45 +0100 Subject: [PATCH 09/35] refactor: remove Adapter, Client implements DA interface directly - Move all DA interface methods (Submit, Get, GetIDs, GetProofs, Commit, Validate) directly into Client - Rename internal Celestia blob API methods to lowercase (submit, get, getAll, getProof, included) - Update all applications to use celestiada.NewClient instead of celestiada.NewAdapter - Remove adapter.go and adapter_test.go (397 lines) - Update client tests to call lowercase internal methods --- apps/evm/single/cmd/run.go | 8 +- apps/grpc/single/cmd/run.go | 8 +- apps/testapp/cmd/run.go | 8 +- da/celestia/adapter.go | 255 ------------------------------------ da/celestia/adapter_test.go | 142 -------------------- da/celestia/client.go | 240 +++++++++++++++++++++++++++++++-- da/celestia/client_test.go | 10 +- 7 files changed, 245 insertions(+), 426 deletions(-) delete mode 100644 da/celestia/adapter.go delete mode 100644 da/celestia/adapter_test.go diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index 0c2f2be85d..1752661243 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -54,11 +54,11 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") ctx := context.Background() - daAdapter, err := celestiada.NewAdapter(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daClient, err := celestiada.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } - defer daAdapter.Close() + defer daClient.Close() datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "evm-single") if err != nil { @@ -84,7 +84,7 @@ var RunCmd = &cobra.Command{ ctx, logger, datastore, - daAdapter, + daClient, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -104,7 +104,7 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, daAdapter, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daClient, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index 7953382f36..a262c6449b 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -52,11 +52,11 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") // Create DA client - daAdapter, err := celestiada.NewAdapter(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daClient, err := celestiada.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } - defer daAdapter.Close() + defer daClient.Close() // Create datastore datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "grpc-single") @@ -85,7 +85,7 @@ The execution client must implement the Evolve execution gRPC interface.`, cmd.Context(), logger, datastore, - daAdapter, + daClient, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -108,7 +108,7 @@ The execution client must implement the Evolve execution gRPC interface.`, } // Start the node - return rollcmd.StartNode(logger, cmd, executor, sequencer, daAdapter, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daClient, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index f51cab76b1..572a42305e 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -51,11 +51,11 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daAdapter, err := celestiada.NewAdapter(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daClient, err := celestiada.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } - defer daAdapter.Close() + defer daClient.Close() nodeKey, err := key.LoadNodeKey(filepath.Dir(nodeConfig.ConfigPath())) if err != nil { @@ -97,7 +97,7 @@ var RunCmd = &cobra.Command{ ctx, logger, datastore, - daAdapter, + daClient, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -112,6 +112,6 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, daAdapter, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daClient, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } diff --git a/da/celestia/adapter.go b/da/celestia/adapter.go deleted file mode 100644 index b1bd8ac6ae..0000000000 --- a/da/celestia/adapter.go +++ /dev/null @@ -1,255 +0,0 @@ -package celestia - -import ( - "context" - "encoding/binary" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/evstack/ev-node/da" - "github.com/rs/zerolog" -) - -// Adapter wraps the Celestia client to implement the da.DA interface. -// This is a temporary bridge to allow ev-node to use the native Celestia blob API -// while maintaining compatibility with the existing DA abstraction. -type Adapter struct { - client *Client - logger zerolog.Logger - maxBlobSize uint64 -} - -// NewAdapter creates a new adapter that implements da.DA interface. -func NewAdapter( - ctx context.Context, - logger zerolog.Logger, - addr string, - token string, - maxBlobSize uint64, -) (*Adapter, error) { - client, err := NewClient(ctx, logger, addr, token, maxBlobSize) - if err != nil { - return nil, err - } - - return &Adapter{ - client: client, - logger: logger, - maxBlobSize: maxBlobSize, - }, nil -} - -// Close closes the underlying client connection. -func (a *Adapter) Close() { - a.client.Close() -} - -// Submit submits blobs to Celestia and returns IDs. -func (a *Adapter) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error) { - return a.SubmitWithOptions(ctx, blobs, gasPrice, namespace, nil) -} - -// SubmitWithOptions submits blobs to Celestia with additional options. -func (a *Adapter) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { - if len(blobs) == 0 { - return []da.ID{}, nil - } - - // Validate namespace - if err := ValidateNamespace(namespace); err != nil { - return nil, fmt.Errorf("invalid namespace: %w", err) - } - - // Convert blobs to Celestia format - celestiaBlobs := make([]*Blob, len(blobs)) - for i, blob := range blobs { - celestiaBlobs[i] = &Blob{ - Namespace: namespace, - Data: blob, - } - } - - // Parse submit options if provided - var opts *SubmitOptions - if len(options) > 0 { - opts = &SubmitOptions{} - if err := json.Unmarshal(options, opts); err != nil { - return nil, fmt.Errorf("failed to unmarshal submit options: %w", err) - } - opts.Fee = gasPrice - } else { - opts = &SubmitOptions{Fee: gasPrice} - } - - height, err := a.client.Submit(ctx, celestiaBlobs, opts) - if err != nil { - if strings.Contains(err.Error(), "timeout") { - return nil, da.ErrTxTimedOut - } - if strings.Contains(err.Error(), "too large") || strings.Contains(err.Error(), "exceeds") { - return nil, da.ErrBlobSizeOverLimit - } - return nil, err - } - - // Create IDs from height and commitments - ids := make([]da.ID, len(celestiaBlobs)) - for i, blob := range celestiaBlobs { - ids[i] = makeID(height, blob.Commitment) - } - - return ids, nil -} - -// Get retrieves blobs by their IDs. -func (a *Adapter) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { - if len(ids) == 0 { - return []da.Blob{}, nil - } - - // Group IDs by height for efficient retrieval - type blobKey struct { - height uint64 - commitment string - } - heightGroups := make(map[uint64][]Commitment) - idToIndex := make(map[blobKey]int) - - for i, id := range ids { - height, commitment, err := splitID(id) - if err != nil { - return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) - } - heightGroups[height] = append(heightGroups[height], commitment) - idToIndex[blobKey{height, string(commitment)}] = i - } - - // Retrieve blobs for each height - result := make([]da.Blob, len(ids)) - for height := range heightGroups { - blobs, err := a.client.GetAll(ctx, height, []Namespace{namespace}) - if err != nil { - if strings.Contains(err.Error(), "not found") { - return nil, da.ErrBlobNotFound - } - return nil, fmt.Errorf("failed to get blobs at height %d: %w", height, err) - } - - // Match blobs to their original positions - for _, blob := range blobs { - key := blobKey{height, string(blob.Commitment)} - if idx, ok := idToIndex[key]; ok { - result[idx] = blob.Data - } - } - } - - return result, nil -} - -// GetIDs returns all blob IDs at the given height. -func (a *Adapter) GetIDs(ctx context.Context, height uint64, namespace []byte) (*da.GetIDsResult, error) { - blobs, err := a.client.GetAll(ctx, height, []Namespace{namespace}) - if err != nil { - if strings.Contains(err.Error(), "not found") { - return nil, da.ErrBlobNotFound - } - if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { - return nil, da.ErrHeightFromFuture - } - return nil, err - } - - if len(blobs) == 0 { - return nil, da.ErrBlobNotFound - } - - ids := make([]da.ID, len(blobs)) - for i, blob := range blobs { - ids[i] = makeID(height, blob.Commitment) - } - - return &da.GetIDsResult{ - IDs: ids, - Timestamp: time.Now(), - }, nil -} - -// GetProofs retrieves inclusion proofs for the given IDs. -func (a *Adapter) GetProofs(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Proof, error) { - if len(ids) == 0 { - return []da.Proof{}, nil - } - - proofs := make([]da.Proof, len(ids)) - for i, id := range ids { - height, commitment, err := splitID(id) - if err != nil { - return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) - } - - proof, err := a.client.GetProof(ctx, height, namespace, commitment) - if err != nil { - return nil, fmt.Errorf("failed to get proof for ID %d: %w", i, err) - } - - proofs[i] = proof.Data - } - - return proofs, nil -} - -// Commit creates commitments for the given blobs. -// Note: Celestia generates commitments automatically during submission, -// so this is a no-op that returns nil commitments. -func (a *Adapter) Commit(ctx context.Context, blobs []da.Blob, namespace []byte) ([]da.Commitment, error) { - commitments := make([]da.Commitment, len(blobs)) - for i := range blobs { - commitments[i] = nil - } - return commitments, nil -} - -// Validate validates commitments against proofs. -func (a *Adapter) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, namespace []byte) ([]bool, error) { - if len(ids) != len(proofs) { - return nil, fmt.Errorf("mismatched lengths: %d IDs vs %d proofs", len(ids), len(proofs)) - } - - results := make([]bool, len(ids)) - for i, id := range ids { - height, commitment, err := splitID(id) - if err != nil { - return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) - } - - proof := &Proof{Data: proofs[i]} - included, err := a.client.Included(ctx, height, namespace, proof, commitment) - if err != nil { - return nil, fmt.Errorf("failed to validate proof %d: %w", i, err) - } - - results[i] = included - } - - return results, nil -} - -// makeID creates an ID from a height and a commitment. -func makeID(height uint64, commitment []byte) []byte { - id := make([]byte, len(commitment)+8) - binary.LittleEndian.PutUint64(id, height) - copy(id[8:], commitment) - return id -} - -// splitID splits an ID into a height and a commitment. -func splitID(id []byte) (uint64, []byte, error) { - if len(id) <= 8 { - return 0, nil, fmt.Errorf("invalid ID length: %d", len(id)) - } - commitment := id[8:] - return binary.LittleEndian.Uint64(id[:8]), commitment, nil -} diff --git a/da/celestia/adapter_test.go b/da/celestia/adapter_test.go deleted file mode 100644 index 996a98adde..0000000000 --- a/da/celestia/adapter_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package celestia - -import ( - "context" - "testing" - - "github.com/evstack/ev-node/da" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewAdapter(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - tests := []struct { - name string - addr string - token string - maxBlobSize uint64 - wantErr bool - }{ - { - name: "valid parameters", - addr: "http://localhost:26658", - token: "test-token", - maxBlobSize: 1024 * 1024, - wantErr: false, - }, - { - name: "empty address", - addr: "", - token: "test-token", - maxBlobSize: 1024, - wantErr: true, - }, - { - name: "zero maxBlobSize", - addr: "http://localhost:26658", - token: "test-token", - maxBlobSize: 0, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - adapter, err := NewAdapter(ctx, logger, tt.addr, tt.token, tt.maxBlobSize) - - if tt.wantErr { - require.Error(t, err) - assert.Nil(t, adapter) - } else { - require.NoError(t, err) - require.NotNil(t, adapter) - adapter.Close() - } - }) - } -} - -func TestAdapter_Submit(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) - require.NoError(t, err) - defer adapter.Close() - - validNamespace := make([]byte, 29) - blobs := []da.Blob{[]byte("test data")} - - _, err = adapter.Submit(ctx, blobs, 0.002, validNamespace) - require.Error(t, err) - assert.Contains(t, err.Error(), "failed to submit blobs") -} - -func TestAdapter_SubmitWithInvalidNamespace(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) - require.NoError(t, err) - defer adapter.Close() - - invalidNamespace := make([]byte, 10) - blobs := []da.Blob{[]byte("test data")} - - _, err = adapter.Submit(ctx, blobs, 0.002, invalidNamespace) - require.Error(t, err) - assert.Contains(t, err.Error(), "invalid namespace") -} - -func TestAdapter_Get(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) - require.NoError(t, err) - defer adapter.Close() - - validNamespace := make([]byte, 29) - testID := makeID(100, []byte("test-commitment")) - - _, err = adapter.Get(ctx, []da.ID{testID}, validNamespace) - require.Error(t, err) -} - -func TestAdapter_GetIDs(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) - require.NoError(t, err) - defer adapter.Close() - - validNamespace := make([]byte, 29) - - _, err = adapter.GetIDs(ctx, 100, validNamespace) - require.Error(t, err) -} - -func TestMakeIDAndSplitID(t *testing.T) { - height := uint64(12345) - commitment := []byte("test-commitment-data") - - id := makeID(height, commitment) - - retrievedHeight, retrievedCommitment, err := splitID(id) - require.NoError(t, err) - assert.Equal(t, height, retrievedHeight) - assert.Equal(t, commitment, retrievedCommitment) -} - -func TestSplitID_InvalidID(t *testing.T) { - shortID := []byte("short") - - _, _, err := splitID(shortID) - require.Error(t, err) - assert.Contains(t, err.Error(), "invalid ID length") -} diff --git a/da/celestia/client.go b/da/celestia/client.go index 96da15a71d..5f95a09168 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -2,14 +2,20 @@ package celestia import ( "context" + "encoding/binary" + "encoding/json" "fmt" "net/http" + "strings" + "time" "github.com/filecoin-project/go-jsonrpc" "github.com/rs/zerolog" + + "github.com/evstack/ev-node/da" ) -// Client connects to celestia-node's blob API via JSON-RPC. +// Client connects to celestia-node's blob API via JSON-RPC and implements the da.DA interface. type Client struct { logger zerolog.Logger maxBlobSize uint64 @@ -24,7 +30,7 @@ type Client struct { } } -// NewClient creates a new client connected to celestia-node. +// NewClient creates a new client connected to celestia-node that implements the da.DA interface. // Token is obtained from: celestia light auth write func NewClient( ctx context.Context, @@ -81,8 +87,8 @@ func (c *Client) Close() { c.logger.Debug().Msg("Celestia client connection closed") } -// Submit submits blobs to Celestia and returns the height at which they were included. -func (c *Client) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) { +// submit is a private method that submits blobs and returns the height (used internally). +func (c *Client) submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) { c.logger.Debug(). Int("num_blobs", len(blobs)). Msg("Submitting blobs to Celestia") @@ -104,8 +110,8 @@ func (c *Client) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) return height, nil } -// Get retrieves a single blob by commitment at a given height and namespace. -func (c *Client) Get(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Blob, error) { +// get retrieves a single blob by commitment at a given height and namespace (used internally). +func (c *Client) get(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Blob, error) { c.logger.Debug(). Uint64("height", height). Msg("Getting blob from Celestia") @@ -127,8 +133,8 @@ func (c *Client) Get(ctx context.Context, height uint64, namespace Namespace, co return blob, nil } -// GetAll retrieves all blobs at a given height for the specified namespaces. -func (c *Client) GetAll(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) { +// getAll retrieves all blobs at a given height for the specified namespaces (used internally). +func (c *Client) getAll(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) { c.logger.Debug(). Uint64("height", height). Int("num_namespaces", len(namespaces)). @@ -152,8 +158,8 @@ func (c *Client) GetAll(ctx context.Context, height uint64, namespaces []Namespa return blobs, nil } -// GetProof retrieves the inclusion proof for a blob. -func (c *Client) GetProof(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Proof, error) { +// getProof retrieves the inclusion proof for a blob (used internally). +func (c *Client) getProof(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Proof, error) { c.logger.Debug(). Uint64("height", height). Msg("Getting proof from Celestia") @@ -175,8 +181,8 @@ func (c *Client) GetProof(ctx context.Context, height uint64, namespace Namespac return proof, nil } -// Included checks whether a blob is included in the Celestia block. -func (c *Client) Included(ctx context.Context, height uint64, namespace Namespace, proof *Proof, commitment Commitment) (bool, error) { +// included checks whether a blob is included in the Celestia block (used internally). +func (c *Client) included(ctx context.Context, height uint64, namespace Namespace, proof *Proof, commitment Commitment) (bool, error) { c.logger.Debug(). Uint64("height", height). Msg("Checking blob inclusion in Celestia") @@ -197,3 +203,213 @@ func (c *Client) Included(ctx context.Context, height uint64, namespace Namespac return included, nil } + +// DA interface implementation + +// Submit submits blobs to Celestia and returns IDs. +func (c *Client) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error) { + return c.SubmitWithOptions(ctx, blobs, gasPrice, namespace, nil) +} + +// SubmitWithOptions submits blobs to Celestia with additional options. +func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { + if len(blobs) == 0 { + return []da.ID{}, nil + } + + // Validate namespace + if err := ValidateNamespace(namespace); err != nil { + return nil, fmt.Errorf("invalid namespace: %w", err) + } + + // Convert blobs to Celestia format + celestiaBlobs := make([]*Blob, len(blobs)) + for i, blob := range blobs { + celestiaBlobs[i] = &Blob{ + Namespace: namespace, + Data: blob, + } + } + + // Parse submit options if provided + var opts *SubmitOptions + if len(options) > 0 { + opts = &SubmitOptions{} + if err := json.Unmarshal(options, opts); err != nil { + return nil, fmt.Errorf("failed to unmarshal submit options: %w", err) + } + opts.Fee = gasPrice + } else { + opts = &SubmitOptions{Fee: gasPrice} + } + + height, err := c.submit(ctx, celestiaBlobs, opts) + if err != nil { + if strings.Contains(err.Error(), "timeout") { + return nil, da.ErrTxTimedOut + } + if strings.Contains(err.Error(), "too large") || strings.Contains(err.Error(), "exceeds") { + return nil, da.ErrBlobSizeOverLimit + } + return nil, err + } + + // Create IDs from height and commitments + ids := make([]da.ID, len(celestiaBlobs)) + for i, blob := range celestiaBlobs { + ids[i] = makeID(height, blob.Commitment) + } + + return ids, nil +} + +// Get retrieves blobs by their IDs. +func (c *Client) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { + if len(ids) == 0 { + return []da.Blob{}, nil + } + + // Group IDs by height for efficient retrieval + type blobKey struct { + height uint64 + commitment string + } + heightGroups := make(map[uint64][]Commitment) + idToIndex := make(map[blobKey]int) + + for i, id := range ids { + height, commitment, err := splitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + heightGroups[height] = append(heightGroups[height], commitment) + idToIndex[blobKey{height, string(commitment)}] = i + } + + // Retrieve blobs for each height + result := make([]da.Blob, len(ids)) + for height := range heightGroups { + blobs, err := c.getAll(ctx, height, []Namespace{namespace}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, da.ErrBlobNotFound + } + return nil, fmt.Errorf("failed to get blobs at height %d: %w", height, err) + } + + // Match blobs to their original positions + for _, blob := range blobs { + key := blobKey{height, string(blob.Commitment)} + if idx, ok := idToIndex[key]; ok { + result[idx] = blob.Data + } + } + } + + return result, nil +} + +// GetIDs returns all blob IDs at the given height. +func (c *Client) GetIDs(ctx context.Context, height uint64, namespace []byte) (*da.GetIDsResult, error) { + blobs, err := c.getAll(ctx, height, []Namespace{namespace}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, da.ErrBlobNotFound + } + if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { + return nil, da.ErrHeightFromFuture + } + return nil, err + } + + if len(blobs) == 0 { + return nil, da.ErrBlobNotFound + } + + ids := make([]da.ID, len(blobs)) + for i, blob := range blobs { + ids[i] = makeID(height, blob.Commitment) + } + + return &da.GetIDsResult{ + IDs: ids, + Timestamp: time.Now(), + }, nil +} + +// GetProofs retrieves inclusion proofs for the given IDs. +func (c *Client) GetProofs(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Proof, error) { + if len(ids) == 0 { + return []da.Proof{}, nil + } + + proofs := make([]da.Proof, len(ids)) + for i, id := range ids { + height, commitment, err := splitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + + proof, err := c.getProof(ctx, height, namespace, commitment) + if err != nil { + return nil, fmt.Errorf("failed to get proof for ID %d: %w", i, err) + } + + proofs[i] = proof.Data + } + + return proofs, nil +} + +// Commit creates commitments for the given blobs. +// Note: Celestia generates commitments automatically during submission, +// so this is a no-op that returns nil commitments. +func (c *Client) Commit(ctx context.Context, blobs []da.Blob, namespace []byte) ([]da.Commitment, error) { + commitments := make([]da.Commitment, len(blobs)) + for i := range blobs { + commitments[i] = nil + } + return commitments, nil +} + +// Validate validates commitments against proofs. +func (c *Client) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, namespace []byte) ([]bool, error) { + if len(ids) != len(proofs) { + return nil, fmt.Errorf("mismatched lengths: %d IDs vs %d proofs", len(ids), len(proofs)) + } + + results := make([]bool, len(ids)) + for i, id := range ids { + height, commitment, err := splitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + + proof := &Proof{Data: proofs[i]} + included, err := c.included(ctx, height, namespace, proof, commitment) + if err != nil { + return nil, fmt.Errorf("failed to validate proof %d: %w", i, err) + } + + results[i] = included + } + + return results, nil +} + +// makeID creates an ID from a height and a commitment. +func makeID(height uint64, commitment []byte) []byte { + id := make([]byte, len(commitment)+8) + binary.LittleEndian.PutUint64(id, height) + copy(id[8:], commitment) + return id +} + +// splitID splits an ID into a height and a commitment. +func splitID(id []byte) (uint64, []byte, error) { + if len(id) < 8 { + return 0, nil, fmt.Errorf("invalid ID length: %d", len(id)) + } + commitment := id[8:] + return binary.LittleEndian.Uint64(id[:8]), commitment, nil +} diff --git a/da/celestia/client_test.go b/da/celestia/client_test.go index 60954ff39a..9d57d5cc42 100644 --- a/da/celestia/client_test.go +++ b/da/celestia/client_test.go @@ -147,7 +147,7 @@ func TestClient_Submit(t *testing.T) { require.NoError(t, err) defer client.Close() - _, err = client.Submit(ctx, tt.blobs, nil) + _, err = client.submit(ctx, tt.blobs, nil) if tt.wantRPC { require.Error(t, err) @@ -168,7 +168,7 @@ func TestClient_Get(t *testing.T) { validNamespace := make([]byte, 29) validCommitment := []byte("commitment") - _, err = client.Get(ctx, 100, validNamespace, validCommitment) + _, err = client.get(ctx, 100, validNamespace, validCommitment) require.Error(t, err) assert.Contains(t, err.Error(), "failed to get blob") } @@ -184,7 +184,7 @@ func TestClient_GetAll(t *testing.T) { validNamespace := make([]byte, 29) namespaces := []Namespace{validNamespace} - _, err = client.GetAll(ctx, 100, namespaces) + _, err = client.getAll(ctx, 100, namespaces) require.Error(t, err) assert.Contains(t, err.Error(), "failed to get blobs") } @@ -200,7 +200,7 @@ func TestClient_GetProof(t *testing.T) { validNamespace := make([]byte, 29) validCommitment := []byte("commitment") - _, err = client.GetProof(ctx, 100, validNamespace, validCommitment) + _, err = client.getProof(ctx, 100, validNamespace, validCommitment) require.Error(t, err) assert.Contains(t, err.Error(), "failed to get proof") } @@ -217,7 +217,7 @@ func TestClient_Included(t *testing.T) { validCommitment := []byte("commitment") proof := &Proof{Data: []byte("proof")} - _, err = client.Included(ctx, 100, validNamespace, proof, validCommitment) + _, err = client.included(ctx, 100, validNamespace, proof, validCommitment) require.Error(t, err) assert.Contains(t, err.Error(), "failed to check inclusion") } From 190da1a21dfa0eb6d5787c5d4e3d5a7926f7a832 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Mon, 24 Nov 2025 13:38:45 +0100 Subject: [PATCH 10/35] refactor: remove Adapter, Client implements DA interface directly - Move all DA interface methods (Submit, Get, GetIDs, GetProofs, Commit, Validate) directly into Client - Rename internal Celestia blob API methods to lowercase (submit, get, getAll, getProof, included) - Update all applications to use celestiada.NewClient instead of celestiada.NewAdapter - Remove adapter.go and adapter_test.go (397 lines) - Update client tests to call lowercase internal methods --- apps/evm/single/cmd/run.go | 8 +- apps/grpc/single/cmd/run.go | 8 +- apps/testapp/cmd/run.go | 8 +- da/celestia/adapter.go | 255 ------------------------------------ da/celestia/adapter_test.go | 142 -------------------- da/celestia/client.go | 232 ++++++++++++++++++++++++++++++-- da/celestia/client_test.go | 10 +- da/da.go | 2 +- go.mod | 3 + 9 files changed, 241 insertions(+), 427 deletions(-) delete mode 100644 da/celestia/adapter.go delete mode 100644 da/celestia/adapter_test.go diff --git a/apps/evm/single/cmd/run.go b/apps/evm/single/cmd/run.go index 0c2f2be85d..1752661243 100644 --- a/apps/evm/single/cmd/run.go +++ b/apps/evm/single/cmd/run.go @@ -54,11 +54,11 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") ctx := context.Background() - daAdapter, err := celestiada.NewAdapter(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daClient, err := celestiada.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } - defer daAdapter.Close() + defer daClient.Close() datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "evm-single") if err != nil { @@ -84,7 +84,7 @@ var RunCmd = &cobra.Command{ ctx, logger, datastore, - daAdapter, + daClient, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -104,7 +104,7 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, daAdapter, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daClient, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } diff --git a/apps/grpc/single/cmd/run.go b/apps/grpc/single/cmd/run.go index 7953382f36..a262c6449b 100644 --- a/apps/grpc/single/cmd/run.go +++ b/apps/grpc/single/cmd/run.go @@ -52,11 +52,11 @@ The execution client must implement the Evolve execution gRPC interface.`, logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") // Create DA client - daAdapter, err := celestiada.NewAdapter(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daClient, err := celestiada.NewClient(cmd.Context(), logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } - defer daAdapter.Close() + defer daClient.Close() // Create datastore datastore, err := store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "grpc-single") @@ -85,7 +85,7 @@ The execution client must implement the Evolve execution gRPC interface.`, cmd.Context(), logger, datastore, - daAdapter, + daClient, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -108,7 +108,7 @@ The execution client must implement the Evolve execution gRPC interface.`, } // Start the node - return rollcmd.StartNode(logger, cmd, executor, sequencer, daAdapter, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daClient, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } diff --git a/apps/testapp/cmd/run.go b/apps/testapp/cmd/run.go index f51cab76b1..572a42305e 100644 --- a/apps/testapp/cmd/run.go +++ b/apps/testapp/cmd/run.go @@ -51,11 +51,11 @@ var RunCmd = &cobra.Command{ logger.Info().Str("headerNamespace", headerNamespace.HexString()).Str("dataNamespace", dataNamespace.HexString()).Msg("namespaces") - daAdapter, err := celestiada.NewAdapter(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) + daClient, err := celestiada.NewClient(ctx, logger, nodeConfig.DA.Address, nodeConfig.DA.AuthToken, rollcmd.DefaultMaxBlobSize) if err != nil { return err } - defer daAdapter.Close() + defer daClient.Close() nodeKey, err := key.LoadNodeKey(filepath.Dir(nodeConfig.ConfigPath())) if err != nil { @@ -97,7 +97,7 @@ var RunCmd = &cobra.Command{ ctx, logger, datastore, - daAdapter, + daClient, []byte(genesis.ChainID), nodeConfig.Node.BlockTime.Duration, singleMetrics, @@ -112,6 +112,6 @@ var RunCmd = &cobra.Command{ return err } - return rollcmd.StartNode(logger, cmd, executor, sequencer, daAdapter, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) + return rollcmd.StartNode(logger, cmd, executor, sequencer, daClient, p2pClient, datastore, nodeConfig, genesis, node.NodeOptions{}) }, } diff --git a/da/celestia/adapter.go b/da/celestia/adapter.go deleted file mode 100644 index b1bd8ac6ae..0000000000 --- a/da/celestia/adapter.go +++ /dev/null @@ -1,255 +0,0 @@ -package celestia - -import ( - "context" - "encoding/binary" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/evstack/ev-node/da" - "github.com/rs/zerolog" -) - -// Adapter wraps the Celestia client to implement the da.DA interface. -// This is a temporary bridge to allow ev-node to use the native Celestia blob API -// while maintaining compatibility with the existing DA abstraction. -type Adapter struct { - client *Client - logger zerolog.Logger - maxBlobSize uint64 -} - -// NewAdapter creates a new adapter that implements da.DA interface. -func NewAdapter( - ctx context.Context, - logger zerolog.Logger, - addr string, - token string, - maxBlobSize uint64, -) (*Adapter, error) { - client, err := NewClient(ctx, logger, addr, token, maxBlobSize) - if err != nil { - return nil, err - } - - return &Adapter{ - client: client, - logger: logger, - maxBlobSize: maxBlobSize, - }, nil -} - -// Close closes the underlying client connection. -func (a *Adapter) Close() { - a.client.Close() -} - -// Submit submits blobs to Celestia and returns IDs. -func (a *Adapter) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error) { - return a.SubmitWithOptions(ctx, blobs, gasPrice, namespace, nil) -} - -// SubmitWithOptions submits blobs to Celestia with additional options. -func (a *Adapter) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { - if len(blobs) == 0 { - return []da.ID{}, nil - } - - // Validate namespace - if err := ValidateNamespace(namespace); err != nil { - return nil, fmt.Errorf("invalid namespace: %w", err) - } - - // Convert blobs to Celestia format - celestiaBlobs := make([]*Blob, len(blobs)) - for i, blob := range blobs { - celestiaBlobs[i] = &Blob{ - Namespace: namespace, - Data: blob, - } - } - - // Parse submit options if provided - var opts *SubmitOptions - if len(options) > 0 { - opts = &SubmitOptions{} - if err := json.Unmarshal(options, opts); err != nil { - return nil, fmt.Errorf("failed to unmarshal submit options: %w", err) - } - opts.Fee = gasPrice - } else { - opts = &SubmitOptions{Fee: gasPrice} - } - - height, err := a.client.Submit(ctx, celestiaBlobs, opts) - if err != nil { - if strings.Contains(err.Error(), "timeout") { - return nil, da.ErrTxTimedOut - } - if strings.Contains(err.Error(), "too large") || strings.Contains(err.Error(), "exceeds") { - return nil, da.ErrBlobSizeOverLimit - } - return nil, err - } - - // Create IDs from height and commitments - ids := make([]da.ID, len(celestiaBlobs)) - for i, blob := range celestiaBlobs { - ids[i] = makeID(height, blob.Commitment) - } - - return ids, nil -} - -// Get retrieves blobs by their IDs. -func (a *Adapter) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { - if len(ids) == 0 { - return []da.Blob{}, nil - } - - // Group IDs by height for efficient retrieval - type blobKey struct { - height uint64 - commitment string - } - heightGroups := make(map[uint64][]Commitment) - idToIndex := make(map[blobKey]int) - - for i, id := range ids { - height, commitment, err := splitID(id) - if err != nil { - return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) - } - heightGroups[height] = append(heightGroups[height], commitment) - idToIndex[blobKey{height, string(commitment)}] = i - } - - // Retrieve blobs for each height - result := make([]da.Blob, len(ids)) - for height := range heightGroups { - blobs, err := a.client.GetAll(ctx, height, []Namespace{namespace}) - if err != nil { - if strings.Contains(err.Error(), "not found") { - return nil, da.ErrBlobNotFound - } - return nil, fmt.Errorf("failed to get blobs at height %d: %w", height, err) - } - - // Match blobs to their original positions - for _, blob := range blobs { - key := blobKey{height, string(blob.Commitment)} - if idx, ok := idToIndex[key]; ok { - result[idx] = blob.Data - } - } - } - - return result, nil -} - -// GetIDs returns all blob IDs at the given height. -func (a *Adapter) GetIDs(ctx context.Context, height uint64, namespace []byte) (*da.GetIDsResult, error) { - blobs, err := a.client.GetAll(ctx, height, []Namespace{namespace}) - if err != nil { - if strings.Contains(err.Error(), "not found") { - return nil, da.ErrBlobNotFound - } - if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { - return nil, da.ErrHeightFromFuture - } - return nil, err - } - - if len(blobs) == 0 { - return nil, da.ErrBlobNotFound - } - - ids := make([]da.ID, len(blobs)) - for i, blob := range blobs { - ids[i] = makeID(height, blob.Commitment) - } - - return &da.GetIDsResult{ - IDs: ids, - Timestamp: time.Now(), - }, nil -} - -// GetProofs retrieves inclusion proofs for the given IDs. -func (a *Adapter) GetProofs(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Proof, error) { - if len(ids) == 0 { - return []da.Proof{}, nil - } - - proofs := make([]da.Proof, len(ids)) - for i, id := range ids { - height, commitment, err := splitID(id) - if err != nil { - return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) - } - - proof, err := a.client.GetProof(ctx, height, namespace, commitment) - if err != nil { - return nil, fmt.Errorf("failed to get proof for ID %d: %w", i, err) - } - - proofs[i] = proof.Data - } - - return proofs, nil -} - -// Commit creates commitments for the given blobs. -// Note: Celestia generates commitments automatically during submission, -// so this is a no-op that returns nil commitments. -func (a *Adapter) Commit(ctx context.Context, blobs []da.Blob, namespace []byte) ([]da.Commitment, error) { - commitments := make([]da.Commitment, len(blobs)) - for i := range blobs { - commitments[i] = nil - } - return commitments, nil -} - -// Validate validates commitments against proofs. -func (a *Adapter) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, namespace []byte) ([]bool, error) { - if len(ids) != len(proofs) { - return nil, fmt.Errorf("mismatched lengths: %d IDs vs %d proofs", len(ids), len(proofs)) - } - - results := make([]bool, len(ids)) - for i, id := range ids { - height, commitment, err := splitID(id) - if err != nil { - return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) - } - - proof := &Proof{Data: proofs[i]} - included, err := a.client.Included(ctx, height, namespace, proof, commitment) - if err != nil { - return nil, fmt.Errorf("failed to validate proof %d: %w", i, err) - } - - results[i] = included - } - - return results, nil -} - -// makeID creates an ID from a height and a commitment. -func makeID(height uint64, commitment []byte) []byte { - id := make([]byte, len(commitment)+8) - binary.LittleEndian.PutUint64(id, height) - copy(id[8:], commitment) - return id -} - -// splitID splits an ID into a height and a commitment. -func splitID(id []byte) (uint64, []byte, error) { - if len(id) <= 8 { - return 0, nil, fmt.Errorf("invalid ID length: %d", len(id)) - } - commitment := id[8:] - return binary.LittleEndian.Uint64(id[:8]), commitment, nil -} diff --git a/da/celestia/adapter_test.go b/da/celestia/adapter_test.go deleted file mode 100644 index 996a98adde..0000000000 --- a/da/celestia/adapter_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package celestia - -import ( - "context" - "testing" - - "github.com/evstack/ev-node/da" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewAdapter(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - tests := []struct { - name string - addr string - token string - maxBlobSize uint64 - wantErr bool - }{ - { - name: "valid parameters", - addr: "http://localhost:26658", - token: "test-token", - maxBlobSize: 1024 * 1024, - wantErr: false, - }, - { - name: "empty address", - addr: "", - token: "test-token", - maxBlobSize: 1024, - wantErr: true, - }, - { - name: "zero maxBlobSize", - addr: "http://localhost:26658", - token: "test-token", - maxBlobSize: 0, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - adapter, err := NewAdapter(ctx, logger, tt.addr, tt.token, tt.maxBlobSize) - - if tt.wantErr { - require.Error(t, err) - assert.Nil(t, adapter) - } else { - require.NoError(t, err) - require.NotNil(t, adapter) - adapter.Close() - } - }) - } -} - -func TestAdapter_Submit(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) - require.NoError(t, err) - defer adapter.Close() - - validNamespace := make([]byte, 29) - blobs := []da.Blob{[]byte("test data")} - - _, err = adapter.Submit(ctx, blobs, 0.002, validNamespace) - require.Error(t, err) - assert.Contains(t, err.Error(), "failed to submit blobs") -} - -func TestAdapter_SubmitWithInvalidNamespace(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) - require.NoError(t, err) - defer adapter.Close() - - invalidNamespace := make([]byte, 10) - blobs := []da.Blob{[]byte("test data")} - - _, err = adapter.Submit(ctx, blobs, 0.002, invalidNamespace) - require.Error(t, err) - assert.Contains(t, err.Error(), "invalid namespace") -} - -func TestAdapter_Get(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) - require.NoError(t, err) - defer adapter.Close() - - validNamespace := make([]byte, 29) - testID := makeID(100, []byte("test-commitment")) - - _, err = adapter.Get(ctx, []da.ID{testID}, validNamespace) - require.Error(t, err) -} - -func TestAdapter_GetIDs(t *testing.T) { - logger := zerolog.Nop() - ctx := context.Background() - - adapter, err := NewAdapter(ctx, logger, "http://localhost:26658", "token", 1024*1024) - require.NoError(t, err) - defer adapter.Close() - - validNamespace := make([]byte, 29) - - _, err = adapter.GetIDs(ctx, 100, validNamespace) - require.Error(t, err) -} - -func TestMakeIDAndSplitID(t *testing.T) { - height := uint64(12345) - commitment := []byte("test-commitment-data") - - id := makeID(height, commitment) - - retrievedHeight, retrievedCommitment, err := splitID(id) - require.NoError(t, err) - assert.Equal(t, height, retrievedHeight) - assert.Equal(t, commitment, retrievedCommitment) -} - -func TestSplitID_InvalidID(t *testing.T) { - shortID := []byte("short") - - _, _, err := splitID(shortID) - require.Error(t, err) - assert.Contains(t, err.Error(), "invalid ID length") -} diff --git a/da/celestia/client.go b/da/celestia/client.go index 96da15a71d..7328637ae8 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -2,14 +2,20 @@ package celestia import ( "context" + "encoding/binary" + "encoding/json" "fmt" "net/http" + "strings" + "time" "github.com/filecoin-project/go-jsonrpc" "github.com/rs/zerolog" + + "github.com/evstack/ev-node/da" ) -// Client connects to celestia-node's blob API via JSON-RPC. +// Client connects to celestia-node's blob API via JSON-RPC and implements the da.DA interface. type Client struct { logger zerolog.Logger maxBlobSize uint64 @@ -24,7 +30,7 @@ type Client struct { } } -// NewClient creates a new client connected to celestia-node. +// NewClient creates a new client connected to celestia-node that implements the da.DA interface. // Token is obtained from: celestia light auth write func NewClient( ctx context.Context, @@ -81,8 +87,8 @@ func (c *Client) Close() { c.logger.Debug().Msg("Celestia client connection closed") } -// Submit submits blobs to Celestia and returns the height at which they were included. -func (c *Client) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) { +// submit is a private method that submits blobs and returns the height (used internally). +func (c *Client) submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) { c.logger.Debug(). Int("num_blobs", len(blobs)). Msg("Submitting blobs to Celestia") @@ -104,8 +110,8 @@ func (c *Client) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) return height, nil } -// Get retrieves a single blob by commitment at a given height and namespace. -func (c *Client) Get(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Blob, error) { +// get retrieves a single blob by commitment at a given height and namespace (used internally). +func (c *Client) get(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Blob, error) { c.logger.Debug(). Uint64("height", height). Msg("Getting blob from Celestia") @@ -127,8 +133,8 @@ func (c *Client) Get(ctx context.Context, height uint64, namespace Namespace, co return blob, nil } -// GetAll retrieves all blobs at a given height for the specified namespaces. -func (c *Client) GetAll(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) { +// getAll retrieves all blobs at a given height for the specified namespaces (used internally). +func (c *Client) getAll(ctx context.Context, height uint64, namespaces []Namespace) ([]*Blob, error) { c.logger.Debug(). Uint64("height", height). Int("num_namespaces", len(namespaces)). @@ -152,8 +158,8 @@ func (c *Client) GetAll(ctx context.Context, height uint64, namespaces []Namespa return blobs, nil } -// GetProof retrieves the inclusion proof for a blob. -func (c *Client) GetProof(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Proof, error) { +// getProof retrieves the inclusion proof for a blob (used internally). +func (c *Client) getProof(ctx context.Context, height uint64, namespace Namespace, commitment Commitment) (*Proof, error) { c.logger.Debug(). Uint64("height", height). Msg("Getting proof from Celestia") @@ -175,8 +181,8 @@ func (c *Client) GetProof(ctx context.Context, height uint64, namespace Namespac return proof, nil } -// Included checks whether a blob is included in the Celestia block. -func (c *Client) Included(ctx context.Context, height uint64, namespace Namespace, proof *Proof, commitment Commitment) (bool, error) { +// included checks whether a blob is included in the Celestia block (used internally). +func (c *Client) included(ctx context.Context, height uint64, namespace Namespace, proof *Proof, commitment Commitment) (bool, error) { c.logger.Debug(). Uint64("height", height). Msg("Checking blob inclusion in Celestia") @@ -197,3 +203,205 @@ func (c *Client) Included(ctx context.Context, height uint64, namespace Namespac return included, nil } + +// DA interface implementation + +// Submit submits blobs to Celestia and returns IDs. +func (c *Client) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error) { + return c.SubmitWithOptions(ctx, blobs, gasPrice, namespace, nil) +} + +// SubmitWithOptions submits blobs to Celestia with additional options. +func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { + if len(blobs) == 0 { + return []da.ID{}, nil + } + + // Validate namespace + if err := ValidateNamespace(namespace); err != nil { + return nil, fmt.Errorf("invalid namespace: %w", err) + } + + // Convert blobs to Celestia format + celestiaBlobs := make([]*Blob, len(blobs)) + for i, blob := range blobs { + celestiaBlobs[i] = &Blob{ + Namespace: namespace, + Data: blob, + } + } + + // Parse submit options if provided + var opts *SubmitOptions + if len(options) > 0 { + opts = &SubmitOptions{} + if err := json.Unmarshal(options, opts); err != nil { + return nil, fmt.Errorf("failed to unmarshal submit options: %w", err) + } + opts.Fee = gasPrice + } else { + opts = &SubmitOptions{Fee: gasPrice} + } + + height, err := c.submit(ctx, celestiaBlobs, opts) + if err != nil { + if strings.Contains(err.Error(), "timeout") { + return nil, da.ErrTxTimedOut + } + if strings.Contains(err.Error(), "too large") || strings.Contains(err.Error(), "exceeds") { + return nil, da.ErrBlobSizeOverLimit + } + return nil, err + } + + // Create IDs from height only (commitments not needed for Submit result) + // Commitments will be retrieved later via GetIDs when needed + ids := make([]da.ID, len(celestiaBlobs)) + for i := range celestiaBlobs { + ids[i] = makeID(height, nil) + } + + return ids, nil +} + +// Get retrieves blobs by their IDs. +func (c *Client) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { + if len(ids) == 0 { + return []da.Blob{}, nil + } + + // Group IDs by height for efficient retrieval + type blobKey struct { + height uint64 + commitment string + } + heightGroups := make(map[uint64][]Commitment) + idToIndex := make(map[blobKey]int) + + for i, id := range ids { + height, commitment, err := da.SplitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + heightGroups[height] = append(heightGroups[height], commitment) + idToIndex[blobKey{height, string(commitment)}] = i + } + + // Retrieve blobs for each height + result := make([]da.Blob, len(ids)) + for height := range heightGroups { + blobs, err := c.getAll(ctx, height, []Namespace{namespace}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, da.ErrBlobNotFound + } + return nil, fmt.Errorf("failed to get blobs at height %d: %w", height, err) + } + + // Match blobs to their original positions + for _, blob := range blobs { + key := blobKey{height, string(blob.Commitment)} + if idx, ok := idToIndex[key]; ok { + result[idx] = blob.Data + } + } + } + + return result, nil +} + +// GetIDs returns all blob IDs at the given height. +func (c *Client) GetIDs(ctx context.Context, height uint64, namespace []byte) (*da.GetIDsResult, error) { + blobs, err := c.getAll(ctx, height, []Namespace{namespace}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return nil, da.ErrBlobNotFound + } + if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { + return nil, da.ErrHeightFromFuture + } + return nil, err + } + + if len(blobs) == 0 { + return nil, da.ErrBlobNotFound + } + + ids := make([]da.ID, len(blobs)) + for i, blob := range blobs { + ids[i] = makeID(height, blob.Commitment) + } + + return &da.GetIDsResult{ + IDs: ids, + Timestamp: time.Now(), + }, nil +} + +// GetProofs retrieves inclusion proofs for the given IDs. +func (c *Client) GetProofs(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Proof, error) { + if len(ids) == 0 { + return []da.Proof{}, nil + } + + proofs := make([]da.Proof, len(ids)) + for i, id := range ids { + height, commitment, err := da.SplitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + + proof, err := c.getProof(ctx, height, namespace, commitment) + if err != nil { + return nil, fmt.Errorf("failed to get proof for ID %d: %w", i, err) + } + + proofs[i] = proof.Data + } + + return proofs, nil +} + +// Commit creates commitments for the given blobs. +// Note: Celestia generates commitments automatically during submission, +// so this is a no-op that returns nil commitments. +func (c *Client) Commit(ctx context.Context, blobs []da.Blob, namespace []byte) ([]da.Commitment, error) { + commitments := make([]da.Commitment, len(blobs)) + for i := range blobs { + commitments[i] = nil + } + return commitments, nil +} + +// Validate validates commitments against proofs. +func (c *Client) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, namespace []byte) ([]bool, error) { + if len(ids) != len(proofs) { + return nil, fmt.Errorf("mismatched lengths: %d IDs vs %d proofs", len(ids), len(proofs)) + } + + results := make([]bool, len(ids)) + for i, id := range ids { + height, commitment, err := da.SplitID(id) + if err != nil { + return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) + } + + proof := &Proof{Data: proofs[i]} + included, err := c.included(ctx, height, namespace, proof, commitment) + if err != nil { + return nil, fmt.Errorf("failed to validate proof %d: %w", i, err) + } + + results[i] = included + } + + return results, nil +} + +// makeID creates an ID from a height and a commitment. +func makeID(height uint64, commitment []byte) []byte { + id := make([]byte, len(commitment)+8) + binary.LittleEndian.PutUint64(id, height) + copy(id[8:], commitment) + return id +} diff --git a/da/celestia/client_test.go b/da/celestia/client_test.go index 60954ff39a..9d57d5cc42 100644 --- a/da/celestia/client_test.go +++ b/da/celestia/client_test.go @@ -147,7 +147,7 @@ func TestClient_Submit(t *testing.T) { require.NoError(t, err) defer client.Close() - _, err = client.Submit(ctx, tt.blobs, nil) + _, err = client.submit(ctx, tt.blobs, nil) if tt.wantRPC { require.Error(t, err) @@ -168,7 +168,7 @@ func TestClient_Get(t *testing.T) { validNamespace := make([]byte, 29) validCommitment := []byte("commitment") - _, err = client.Get(ctx, 100, validNamespace, validCommitment) + _, err = client.get(ctx, 100, validNamespace, validCommitment) require.Error(t, err) assert.Contains(t, err.Error(), "failed to get blob") } @@ -184,7 +184,7 @@ func TestClient_GetAll(t *testing.T) { validNamespace := make([]byte, 29) namespaces := []Namespace{validNamespace} - _, err = client.GetAll(ctx, 100, namespaces) + _, err = client.getAll(ctx, 100, namespaces) require.Error(t, err) assert.Contains(t, err.Error(), "failed to get blobs") } @@ -200,7 +200,7 @@ func TestClient_GetProof(t *testing.T) { validNamespace := make([]byte, 29) validCommitment := []byte("commitment") - _, err = client.GetProof(ctx, 100, validNamespace, validCommitment) + _, err = client.getProof(ctx, 100, validNamespace, validCommitment) require.Error(t, err) assert.Contains(t, err.Error(), "failed to get proof") } @@ -217,7 +217,7 @@ func TestClient_Included(t *testing.T) { validCommitment := []byte("commitment") proof := &Proof{Data: []byte("proof")} - _, err = client.Included(ctx, 100, validNamespace, proof, validCommitment) + _, err = client.included(ctx, 100, validNamespace, proof, validCommitment) require.Error(t, err) assert.Contains(t, err.Error(), "failed to check inclusion") } diff --git a/da/da.go b/da/da.go index b1d1b6edb3..f414e0404a 100644 --- a/da/da.go +++ b/da/da.go @@ -104,7 +104,7 @@ func makeID(height uint64, commitment []byte) []byte { // SplitID splits an ID into a height and a commitment. func SplitID(id []byte) (uint64, []byte, error) { - if len(id) <= 8 { + if len(id) < 8 { return 0, nil, fmt.Errorf("invalid ID length: %d", len(id)) } commitment := id[8:] diff --git a/go.mod b/go.mod index 5529903687..ed26a3d7d7 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/celestiaorg/go-square/v3 v3.0.2 github.com/celestiaorg/utils v0.1.0 github.com/evstack/ev-node/core v1.0.0-beta.4 + github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/go-kit/kit v0.13.0 github.com/goccy/go-yaml v1.18.0 github.com/ipfs/go-datastore v0.9.0 @@ -163,3 +164,5 @@ require ( ) replace github.com/evstack/ev-node/core => ./core + +replace github.com/evstack/ev-node/da => ./da From a46e82b7da784349ec35f0f29d72ebad9ca80895 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Tue, 25 Nov 2025 11:44:04 +0100 Subject: [PATCH 11/35] feat: add DA debugging tool for blockchain data inspection - bring back command-line tool `da-debug` for inspecting data availability layers in blockchain. - Added functionality to query and search for blobs at specific DA heights and namespaces. - Introduced detailed logging and error handling for better user experience. - Created unit tests for key functions including namespace parsing, hex decoding, and blob data handling. --- tools/da-debug/README.md | 107 +++++++ tools/da-debug/go.mod | 59 ++++ tools/da-debug/go.sum | 429 +++++++++++++++++++++++++++ tools/da-debug/main.go | 563 ++++++++++++++++++++++++++++++++++++ tools/da-debug/main_test.go | 216 ++++++++++++++ 5 files changed, 1374 insertions(+) create mode 100644 tools/da-debug/README.md create mode 100644 tools/da-debug/go.mod create mode 100644 tools/da-debug/go.sum create mode 100644 tools/da-debug/main.go create mode 100644 tools/da-debug/main_test.go diff --git a/tools/da-debug/README.md b/tools/da-debug/README.md new file mode 100644 index 0000000000..ef6eeae18c --- /dev/null +++ b/tools/da-debug/README.md @@ -0,0 +1,107 @@ +# DA Debug Tool + +A debugging tool for querying and inspecting Data Availability (DA) layer data in ev-node. Connects directly to Celestia's blob API. + +## Overview + +The `da-debug` tool provides a command-line interface to interact with Celestia for debugging purposes. It offers two main commands: `query` for inspecting specific DA heights and `search` for finding blobs containing specific blockchain heights. + +## Installation + +```bash +go install github.com/evstack/ev-node/tools/da-debug@main +``` + +Or build locally: + +```bash +make build-tool-da-debug +``` + +## Commands + +### Query Command + +Query and decode blobs at a specific DA height and namespace. + +```bash +da-debug query [flags] +``` + +**Flags:** + +- `--filter-height uint`: Filter blobs by specific blockchain height (0 = no filter) + +**Examples:** + +```bash +# Basic query +da-debug query 100 "my-rollup" + +# Query with height filter (only show blobs containing height 50) +da-debug query 100 "my-rollup" --filter-height 50 + +# Query with hex namespace +da-debug query 500 "0x000000000000000000000000000000000000000000000000000000746573743031" +``` + +### Search Command + +Search through multiple DA heights to find blobs containing a specific blockchain height. + +```bash +da-debug search --target-height [flags] +``` + +**Flags:** + +- `--target-height uint`: Target blockchain height to search for (required) +- `--range uint`: Number of DA heights to search (default: 10) + +**Examples:** + +```bash +# Search for blockchain height 1000 starting from DA height 500 +da-debug search 500 "my-rollup" --target-height 1000 + +# Search with custom range of 20 DA heights +da-debug search 500 "my-rollup" --target-height 1000 --range 20 + +# Search with hex namespace +da-debug search 100 "0x000000000000000000000000000000000000000000000000000000746573743031" --target-height 50 --range 5 +``` + +## Global Flags + +All commands support these global flags: + +- `--da-url string`: Celestia node RPC URL (default: "http://localhost:26658") +- `--auth-token string`: Authentication token for Celestia node +- `--timeout duration`: Request timeout (default: 30s) +- `--verbose`: Enable verbose logging +- `--max-blob-size uint`: Maximum blob size in bytes (default: 1970176) + +## Namespace Format + +Namespaces can be provided in two formats: + +1. **Hex String**: A 29-byte hex string (with or without `0x` prefix) + - Example: `0x000000000000000000000000000000000000000000000000000000746573743031` + +2. **String Identifier**: Any string that gets automatically converted to a valid namespace + - Example: `"my-app"` or `"test-namespace"` + - The string is hashed and converted to a valid version 0 namespace + +## Getting an Auth Token + +To get an authentication token from your Celestia light node: + +```bash +celestia light auth write +``` + +Then use it with: + +```bash +da-debug query 100 "my-rollup" --auth-token "" +``` diff --git a/tools/da-debug/go.mod b/tools/da-debug/go.mod new file mode 100644 index 0000000000..2ac9cda5c7 --- /dev/null +++ b/tools/da-debug/go.mod @@ -0,0 +1,59 @@ +module github.com/evstack/ev-node/tools/da-debug + +go 1.24.6 + +require ( + github.com/evstack/ev-node v0.0.0 + github.com/evstack/ev-node/da v0.0.0 + github.com/rs/zerolog v1.34.0 + github.com/spf13/cobra v1.10.1 + google.golang.org/protobuf v1.36.10 +) + +require ( + github.com/celestiaorg/go-header v0.7.3 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/filecoin-project/go-jsonrpc v0.9.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect + github.com/ipfs/go-log/v2 v2.8.1 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-libp2p v0.43.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.15.0 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr v0.16.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.2 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-multistream v0.6.1 // indirect + github.com/multiformats/go-varint v0.1.0 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + go.opencensus.io v0.24.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.43.0 // indirect + golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + lukechampine.com/blake3 v1.4.1 // indirect +) + +replace github.com/evstack/ev-node => ../.. + +replace github.com/evstack/ev-node/da => ../../da + +replace github.com/evstack/ev-node/core => ../../core diff --git a/tools/da-debug/go.sum b/tools/da-debug/go.sum new file mode 100644 index 0000000000..4f75d5cf97 --- /dev/null +++ b/tools/da-debug/go.sum @@ -0,0 +1,429 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/celestiaorg/go-header v0.7.3 h1:3+kIa+YXT789gPGRh3a55qmdYq3yTTBIqTyum26AvN0= +github.com/celestiaorg/go-header v0.7.3/go.mod h1:eX9iTSPthVEAlEDLux40ZT/olXPGhpxHd+mEzJeDhd0= +github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= +github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/dgraph-io/badger/v4 v4.5.1 h1:7DCIXrQjo1LKmM96YD+hLVJ2EEsyyoWxJfpdd56HLps= +github.com/dgraph-io/badger/v4 v4.5.1/go.mod h1:qn3Be0j3TfV4kPbVoK0arXCD1/nr1ftth6sbL5jxdoA= +github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= +github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= +github.com/filecoin-project/go-jsonrpc v0.9.0 h1:G47qEF52w7GholpI21vPSTVBFvsrip6geIoqNiqyZtQ= +github.com/filecoin-project/go-jsonrpc v0.9.0/go.mod h1:OG7kVBVh/AbDFHIwx7Kw0l9ARmKOS6gGOr0LbdBpbLc= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/flatbuffers v24.12.23+incompatible h1:ubBKR94NR4pXUCY/MUsRVzd9umNW7ht7EG9hHfS9FX8= +github.com/google/flatbuffers v24.12.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ipfs/boxo v0.35.0 h1:3Mku5arSbAZz0dvb4goXRsQuZkFkPrGr5yYdu0YM1pY= +github.com/ipfs/boxo v0.35.0/go.mod h1:uhaF0DGnbgEiXDTmD249jCGbxVkMm6+Ew85q6Uub7lo= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w= +github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg= +github.com/ipfs/go-ds-badger4 v0.1.8 h1:frNczf5CjCVm62RJ5mW5tD/oLQY/9IKAUpKviRV9QAI= +github.com/ipfs/go-ds-badger4 v0.1.8/go.mod h1:FdqSLA5TMsyqooENB/Hf4xzYE/iH0z/ErLD6ogtfMrA= +github.com/ipfs/go-log/v2 v2.8.1 h1:Y/X36z7ASoLJaYIJAL4xITXgwf7RVeqb1+/25aq/Xk0= +github.com/ipfs/go-log/v2 v2.8.1/go.mod h1:NyhTBcZmh2Y55eWVjOeKf8M7e4pnJYM3yDZNxQBWEEY= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= +github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784= +github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo= +github.com/libp2p/go-libp2p v0.43.0 h1:b2bg2cRNmY4HpLK8VHYQXLX2d3iND95OjodLFymvqXU= +github.com/libp2p/go-libp2p v0.43.0/go.mod h1:IiSqAXDyP2sWH+J2gs43pNmB/y4FOi2XQPbsb+8qvzc= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-kad-dht v0.35.1 h1:RQglhc9OxqDwlFFdhQMwKxIPBIBfGsleROnK5hqVsoE= +github.com/libp2p/go-libp2p-kad-dht v0.35.1/go.mod h1:1oCXzkkBiYh3d5cMWLpInSOZ6am2AlpC4G+GDcZFcE0= +github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s= +github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4= +github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= +github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= +github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8= +github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= +github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.2 h1:YrlXCuqxjqm3bXl+vBq5LKz5pz4mvAsugdqy78k0pXQ= +github.com/multiformats/go-multicodec v0.9.2/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= +github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= +github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI= +github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E= +github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU= +github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= +github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= +github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= +github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= +github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= +github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= +github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= +github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= +github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= +github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= +github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= +github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= +github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= +github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps= +github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs= +github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54= +github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= +github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= +go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= +go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 h1:dHQOQddU4YHS5gY33/6klKjq7Gp3WwMyOXGNp5nzRj8= +golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= diff --git a/tools/da-debug/main.go b/tools/da-debug/main.go new file mode 100644 index 0000000000..9568f49a48 --- /dev/null +++ b/tools/da-debug/main.go @@ -0,0 +1,563 @@ +package main + +import ( + "context" + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/rs/zerolog" + "github.com/spf13/cobra" + "google.golang.org/protobuf/proto" + + "github.com/evstack/ev-node/da" + "github.com/evstack/ev-node/da/celestia" + "github.com/evstack/ev-node/types" + pb "github.com/evstack/ev-node/types/pb/evnode/v1" +) + +var ( + daURL string + authToken string + timeout time.Duration + verbose bool + maxBlobSize uint64 + filterHeight uint64 +) + +func main() { + rootCmd := &cobra.Command{ + Use: "da-debug", + Short: "DA debugging tool for blockchain data inspection", + Long: `DA Debug Tool +A powerful DA debugging tool for inspecting blockchain data availability layers. +Connects directly to Celestia's blob API.`, + } + + // Global flags + rootCmd.PersistentFlags().StringVar(&daURL, "da-url", "http://localhost:26658", "Celestia node RPC URL") + rootCmd.PersistentFlags().StringVar(&authToken, "auth-token", "", "Authentication token for Celestia node") + rootCmd.PersistentFlags().DurationVar(&timeout, "timeout", 30*time.Second, "Request timeout") + rootCmd.PersistentFlags().BoolVar(&verbose, "verbose", false, "Enable verbose logging") + rootCmd.PersistentFlags().Uint64Var(&maxBlobSize, "max-blob-size", 1970176, "Maximum blob size in bytes") + + // Add subcommands + rootCmd.AddCommand(queryCmd()) + rootCmd.AddCommand(searchCmd()) + + if err := rootCmd.Execute(); err != nil { + printError("Error: %v\n", err) + os.Exit(1) + } +} + +func queryCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "query ", + Short: "Query and decode blobs at a specific DA height and namespace", + Long: `Query and decode blobs at a specific DA height and namespace. +Decodes each blob as either header or data and displays detailed information.`, + Args: cobra.ExactArgs(2), + RunE: runQuery, + } + + cmd.Flags().Uint64Var(&filterHeight, "filter-height", 0, "Filter blobs by specific height (0 = no filter)") + + return cmd +} + +func searchCmd() *cobra.Command { + var searchHeight uint64 + var searchRange uint64 + + cmd := &cobra.Command{ + Use: "search --target-height ", + Short: "Search for blobs containing a specific blockchain height", + Long: `Search through multiple DA heights to find blobs containing data from a specific blockchain height. +Starting from the given DA height, searches through a range of DA heights until it finds matching blobs.`, + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return runSearch(cmd, args, searchHeight, searchRange) + }, + } + + cmd.Flags().Uint64Var(&searchHeight, "target-height", 0, "Target blockchain height to search for (required)") + cmd.Flags().Uint64Var(&searchRange, "range", 10, "Number of DA heights to search") + _ = cmd.MarkFlagRequired("target-height") + + return cmd +} + +func runQuery(cmd *cobra.Command, args []string) error { + height, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("invalid height: %w", err) + } + + namespace, err := parseNamespace(args[1]) + if err != nil { + return fmt.Errorf("invalid namespace: %w", err) + } + + printBanner() + printQueryInfo(height, namespace) + + client, err := createDAClient() + if err != nil { + return err + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + return queryHeight(ctx, client, height, namespace) +} + +func runSearch(cmd *cobra.Command, args []string, searchHeight, searchRange uint64) error { + startHeight, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("invalid start height: %w", err) + } + + namespace, err := parseNamespace(args[1]) + if err != nil { + return fmt.Errorf("invalid namespace: %w", err) + } + + printBanner() + printSearchInfo(startHeight, namespace, searchHeight, searchRange) + + client, err := createDAClient() + if err != nil { + return err + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + return searchForHeight(ctx, client, startHeight, namespace, searchHeight, searchRange) +} + +func searchForHeight(ctx context.Context, client *celestia.Client, startHeight uint64, namespace []byte, targetHeight, searchRange uint64) error { + fmt.Printf("Searching for height %d in DA heights %d-%d...\n", targetHeight, startHeight, startHeight+searchRange-1) + fmt.Println() + + foundBlobs := 0 + for daHeight := startHeight; daHeight < startHeight+searchRange; daHeight++ { + result, err := client.GetIDs(ctx, daHeight, namespace) + if err != nil { + if err.Error() == "blob: not found" || strings.Contains(err.Error(), "blob: not found") { + continue + } + if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { + fmt.Printf("Reached future height at DA height %d\n", daHeight) + break + } + continue + } + + if result == nil || len(result.IDs) == 0 { + continue + } + + // Get the actual blob data + blobs, err := client.Get(ctx, result.IDs, namespace) + if err != nil { + continue + } + + // Check each blob for the target height + for i, blob := range blobs { + found := false + var blobHeight uint64 + + // Try to decode as header first + if header := tryDecodeHeader(blob); header != nil { + blobHeight = header.Height() + if blobHeight == targetHeight { + found = true + } + } else if data := tryDecodeData(blob); data != nil { + if data.Metadata != nil { + blobHeight = data.Height() + if blobHeight == targetHeight { + found = true + } + } + } + + if found { + foundBlobs++ + fmt.Printf("FOUND at DA Height %d - BLOB %d\n", daHeight, foundBlobs) + fmt.Println(strings.Repeat("-", 80)) + displayBlobInfo(result.IDs[i], blob) + + // Display the decoded content + if header := tryDecodeHeader(blob); header != nil { + printTypeHeader("SignedHeader") + displayHeader(header) + } else if data := tryDecodeData(blob); data != nil { + printTypeHeader("SignedData") + displayData(data) + } + + fmt.Println() + } + } + } + + fmt.Println(strings.Repeat("=", 50)) + if foundBlobs == 0 { + fmt.Printf("No blobs found containing height %d in DA range %d-%d\n", targetHeight, startHeight, startHeight+searchRange-1) + } else { + fmt.Printf("Found %d blob(s) containing height %d\n", foundBlobs, targetHeight) + } + + return nil +} + +func queryHeight(ctx context.Context, client *celestia.Client, height uint64, namespace []byte) error { + result, err := client.GetIDs(ctx, height, namespace) + if err != nil { + // Handle "blob not found" as a normal case + if err.Error() == "blob: not found" || strings.Contains(err.Error(), "blob: not found") { + fmt.Printf("No blobs found at height %d\n", height) + return nil + } + // Handle future height errors gracefully + if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { + fmt.Printf("Height %d is in the future (not yet available)\n", height) + return nil + } + return fmt.Errorf("failed to get IDs: %w", err) + } + + if result == nil || len(result.IDs) == 0 { + fmt.Printf("No blobs found at height %d\n", height) + return nil + } + + fmt.Printf("Found %d blob(s) at height %d\n", len(result.IDs), height) + fmt.Printf("Timestamp: %s\n", result.Timestamp.Format(time.RFC3339)) + fmt.Println() + + // Get the actual blob data + blobs, err := client.Get(ctx, result.IDs, namespace) + if err != nil { + return fmt.Errorf("failed to get blob data: %w", err) + } + + // Process each blob with optional height filtering + displayedBlobs := 0 + for i, blob := range blobs { + shouldDisplay := true + var blobHeight uint64 + + // Check if we need to filter by height + if filterHeight > 0 { + shouldDisplay = false + + // Try to decode as header first to check height + if header := tryDecodeHeader(blob); header != nil { + blobHeight = header.Height() + if blobHeight == filterHeight { + shouldDisplay = true + } + } else if data := tryDecodeData(blob); data != nil { + if data.Metadata != nil { + blobHeight = data.Height() + if blobHeight == filterHeight { + shouldDisplay = true + } + } + } + } + + if !shouldDisplay { + continue + } + + displayedBlobs++ + printBlobHeader(displayedBlobs, -1) // -1 indicates filtered mode + displayBlobInfo(result.IDs[i], blob) + + // Try to decode as header first + if header := tryDecodeHeader(blob); header != nil { + printTypeHeader("SignedHeader") + displayHeader(header) + } else if data := tryDecodeData(blob); data != nil { + printTypeHeader("SignedData") + displayData(data) + } else { + printTypeHeader("Raw Data") + displayRawData(blob) + } + + if displayedBlobs > 1 { + printSeparator() + } + } + + // Show filter results + if filterHeight > 0 { + if displayedBlobs == 0 { + fmt.Printf("No blobs found matching height filter: %d\n", filterHeight) + } else { + fmt.Printf("Showing %d blob(s) matching height filter: %d\n", displayedBlobs, filterHeight) + } + } + + printFooter() + return nil +} + +func printBanner() { + fmt.Println("DA Debug Tool - Blockchain Data Inspector") + fmt.Println(strings.Repeat("=", 50)) +} + +func printQueryInfo(height uint64, namespace []byte) { + fmt.Printf("DA Height: %d | Namespace: %s | URL: %s", height, formatHash(hex.EncodeToString(namespace)), daURL) + if filterHeight > 0 { + fmt.Printf(" | Filter Height: %d", filterHeight) + } + fmt.Println() + fmt.Println() +} + +func printSearchInfo(startHeight uint64, namespace []byte, targetHeight, searchRange uint64) { + fmt.Printf("Start DA Height: %d | Namespace: %s | URL: %s", startHeight, formatHash(hex.EncodeToString(namespace)), daURL) + fmt.Printf(" | Target Height: %d | Range: %d", targetHeight, searchRange) + fmt.Println() + fmt.Println() +} + +func printBlobHeader(current, total int) { + if total == -1 { + fmt.Printf("BLOB %d\n", current) + } else { + fmt.Printf("BLOB %d/%d\n", current, total) + } + fmt.Println(strings.Repeat("-", 80)) +} + +func displayBlobInfo(id da.ID, blob []byte) { + fmt.Printf("ID: %s\n", formatHash(hex.EncodeToString(id))) + fmt.Printf("Size: %s\n", formatSize(len(blob))) + + // Try to parse the ID to show height and commitment + if idHeight, commitment, err := da.SplitID(id); err == nil { + fmt.Printf("ID Height: %d\n", idHeight) + fmt.Printf("Commitment: %s\n", formatHash(hex.EncodeToString(commitment))) + } +} + +func printTypeHeader(title string) { + fmt.Printf("Type: %s\n", title) +} + +func displayHeader(header *types.SignedHeader) { + fmt.Printf("Height: %d\n", header.Height()) + fmt.Printf("Time: %s\n", header.Time().Format(time.RFC3339)) + fmt.Printf("Chain ID: %s\n", header.ChainID()) + fmt.Printf("Version: Block=%d, App=%d\n", header.Version.Block, header.Version.App) + fmt.Printf("Last Header: %s\n", formatHashField(hex.EncodeToString(header.LastHeaderHash[:]))) + fmt.Printf("Data Hash: %s\n", formatHashField(hex.EncodeToString(header.DataHash[:]))) + fmt.Printf("Validator: %s\n", formatHashField(hex.EncodeToString(header.ValidatorHash[:]))) + fmt.Printf("Proposer: %s\n", formatHashField(hex.EncodeToString(header.ProposerAddress))) + fmt.Printf("Signature: %s\n", formatHashField(hex.EncodeToString(header.Signature))) + if len(header.Signer.Address) > 0 { + fmt.Printf("Signer: %s\n", formatHashField(hex.EncodeToString(header.Signer.Address))) + } +} + +func displayData(data *types.SignedData) { + if data.Metadata != nil { + fmt.Printf("Chain ID: %s\n", data.ChainID()) + fmt.Printf("Height: %d\n", data.Height()) + fmt.Printf("Time: %s\n", data.Time().Format(time.RFC3339)) + fmt.Printf("Last Data: %s\n", formatHashField(hex.EncodeToString(data.LastDataHash[:]))) + } + + dataHash := data.DACommitment() + fmt.Printf("DA Commit: %s\n", formatHashField(hex.EncodeToString(dataHash[:]))) + fmt.Printf("TX Count: %d\n", len(data.Txs)) + fmt.Printf("Signature: %s\n", formatHashField(hex.EncodeToString(data.Signature))) + + if len(data.Signer.Address) > 0 { + fmt.Printf("Signer: %s\n", formatHashField(hex.EncodeToString(data.Signer.Address))) + } + + // Display transactions + if len(data.Txs) > 0 { + fmt.Printf("\nTransactions:\n") + for i, tx := range data.Txs { + fmt.Printf(" [%d] Size: %s, Hash: %s\n", + i+1, + formatSize(len(tx)), + formatShortHash(hex.EncodeToString(tx))) + + if isPrintable(tx) && len(tx) < 200 { + preview := string(tx) + if len(preview) > 60 { + preview = preview[:60] + "..." + } + fmt.Printf(" Data: %s\n", preview) + } + } + } +} + +func displayRawData(blob []byte) { + hexStr := hex.EncodeToString(blob) + if len(hexStr) > 120 { + fmt.Printf("Hex: %s...\n", hexStr[:120]) + fmt.Printf("Full Length: %s\n", formatSize(len(blob))) + } else { + fmt.Printf("Hex: %s\n", hexStr) + } + + if isPrintable(blob) { + strData := string(blob) + if len(strData) > 200 { + fmt.Printf("String: %s...\n", strData[:200]) + } else { + fmt.Printf("String: %s\n", strData) + } + } else { + fmt.Printf("String: (Binary data - not printable)\n") + } +} + +// Helper functions for formatting + +func formatHash(hash string) string { + return hash +} + +func formatHashField(hash string) string { + return hash +} + +func formatShortHash(hash string) string { + if len(hash) > 16 { + return hash[:16] + "..." + } + return hash +} + +func formatSize(bytes int) string { + if bytes < 1024 { + return fmt.Sprintf("%d B", bytes) + } else if bytes < 1024*1024 { + return fmt.Sprintf("%.1f KB", float64(bytes)/1024) + } else { + return fmt.Sprintf("%.1f MB", float64(bytes)/(1024*1024)) + } +} + +func printSeparator() { + fmt.Println() +} + +func printFooter() { + fmt.Println(strings.Repeat("=", 50)) + fmt.Printf("Analysis complete!\n") +} + +func printError(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, "Error: "+format, args...) +} + +func tryDecodeHeader(bz []byte) *types.SignedHeader { + header := new(types.SignedHeader) + var headerPb pb.SignedHeader + + if err := proto.Unmarshal(bz, &headerPb); err != nil { + return nil + } + + if err := header.FromProto(&headerPb); err != nil { + return nil + } + + // Basic validation + if err := header.Header.ValidateBasic(); err != nil { + return nil + } + + return header +} + +func tryDecodeData(bz []byte) *types.SignedData { + var signedData types.SignedData + if err := signedData.UnmarshalBinary(bz); err != nil { + return nil + } + + // Skip completely empty data + if len(signedData.Txs) == 0 && len(signedData.Signature) == 0 { + return nil + } + + return &signedData +} + +func createDAClient() (*celestia.Client, error) { + logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).Level(zerolog.InfoLevel) + if verbose { + logger = logger.Level(zerolog.DebugLevel) + } else { + logger = zerolog.Nop() + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + client, err := celestia.NewClient(ctx, logger, daURL, authToken, maxBlobSize) + if err != nil { + return nil, fmt.Errorf("failed to create Celestia client: %w", err) + } + + return client, nil +} + +func parseNamespace(ns string) ([]byte, error) { + // Try to parse as hex first + if hexBytes, err := parseHex(ns); err == nil && len(hexBytes) == da.NamespaceSize { + return hexBytes, nil + } + + // If not valid hex or not 29 bytes, treat as string identifier + namespace := da.NamespaceFromString(ns) + return namespace.Bytes(), nil +} + +func parseHex(s string) ([]byte, error) { + // Remove 0x prefix if present + if len(s) >= 2 && s[:2] == "0x" { + s = s[2:] + } + + return hex.DecodeString(s) +} + +func isPrintable(data []byte) bool { + if len(data) > 1000 { // Only check first 1000 bytes for performance + data = data[:1000] + } + + for _, b := range data { + if b < 32 || b > 126 { + if b != '\n' && b != '\r' && b != '\t' { + return false + } + } + } + return true +} diff --git a/tools/da-debug/main_test.go b/tools/da-debug/main_test.go new file mode 100644 index 0000000000..69342c90c6 --- /dev/null +++ b/tools/da-debug/main_test.go @@ -0,0 +1,216 @@ +package main + +import ( + "encoding/hex" + "testing" + + "github.com/evstack/ev-node/da" +) + +func TestParseNamespace(t *testing.T) { + tests := []struct { + name string + input string + expected int // expected length in bytes + wantErr bool + }{ + { + name: "valid hex namespace with 0x prefix", + input: "0x000000000000000000000000000000000000000000000000000000746573743031", + expected: 29, + wantErr: false, + }, + { + name: "valid hex namespace without prefix", + input: "000000000000000000000000000000000000000000000000000000746573743031", + expected: 29, + wantErr: false, + }, + { + name: "string identifier", + input: "test-namespace", + expected: 29, + wantErr: false, + }, + { + name: "empty string", + input: "", + expected: 29, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := parseNamespace(tt.input) + + if tt.wantErr && err == nil { + t.Errorf("parseNamespace() expected error, got nil") + } + if !tt.wantErr && err != nil { + t.Errorf("parseNamespace() unexpected error: %v", err) + } + + if len(result) != tt.expected { + t.Errorf("parseNamespace() result length = %d, expected %d", len(result), tt.expected) + } + }) + } +} + +func TestTryDecodeHeader(t *testing.T) { + // Test with invalid data + result := tryDecodeHeader([]byte("invalid")) + if result != nil { + t.Errorf("tryDecodeHeader() with invalid data should return nil") + } + + // Test with empty data + result = tryDecodeHeader([]byte{}) + if result != nil { + t.Errorf("tryDecodeHeader() with empty data should return nil") + } +} + +func TestTryDecodeData(t *testing.T) { + // Test with invalid data + result := tryDecodeData([]byte("invalid")) + if result != nil { + t.Errorf("tryDecodeData() with invalid data should return nil") + } + + // Test with empty data + result = tryDecodeData([]byte{}) + if result != nil { + t.Errorf("tryDecodeData() with empty data should return nil") + } +} + +func TestParseHex(t *testing.T) { + tests := []struct { + name string + input string + expected string + wantErr bool + }{ + { + name: "with 0x prefix", + input: "0xdeadbeef", + expected: "deadbeef", + wantErr: false, + }, + { + name: "without prefix", + input: "deadbeef", + expected: "deadbeef", + wantErr: false, + }, + { + name: "invalid hex", + input: "xyz123", + wantErr: true, + }, + { + name: "empty string", + input: "", + expected: "", + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := parseHex(tt.input) + + if tt.wantErr && err == nil { + t.Errorf("parseHex() expected error, got nil") + } + if !tt.wantErr && err != nil { + t.Errorf("parseHex() unexpected error: %v", err) + } + + if !tt.wantErr { + resultHex := hex.EncodeToString(result) + if resultHex != tt.expected { + t.Errorf("parseHex() result = %s, expected %s", resultHex, tt.expected) + } + } + }) + } +} + +func TestIsPrintable(t *testing.T) { + tests := []struct { + name string + input []byte + expected bool + }{ + { + name: "printable ASCII", + input: []byte("Hello, World!"), + expected: true, + }, + { + name: "with newlines and tabs", + input: []byte("Hello\nWorld\t!"), + expected: true, + }, + { + name: "binary data", + input: []byte{0x00, 0x01, 0x02, 0xFF}, + expected: false, + }, + { + name: "mixed printable and non-printable", + input: []byte("Hello\x00World"), + expected: false, + }, + { + name: "empty", + input: []byte{}, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isPrintable(tt.input) + if result != tt.expected { + t.Errorf("isPrintable() = %v, expected %v", result, tt.expected) + } + }) + } +} + +func TestIDSplitting(t *testing.T) { + // Test with a mock ID that follows the expected format + height := uint64(12345) + commitment := []byte("test-commitment-data") + + // Create an ID using the format from the da.go implementation + id := make([]byte, 8+len(commitment)) + // Use little endian as per the da.go implementation + id[0] = byte(height) + id[1] = byte(height >> 8) + id[2] = byte(height >> 16) + id[3] = byte(height >> 24) + id[4] = byte(height >> 32) + id[5] = byte(height >> 40) + id[6] = byte(height >> 48) + id[7] = byte(height >> 56) + copy(id[8:], commitment) + + // Test splitting + parsedHeight, parsedCommitment, err := da.SplitID(id) + if err != nil { + t.Errorf("SplitID() unexpected error: %v", err) + } + + if parsedHeight != height { + t.Errorf("SplitID() height = %d, expected %d", parsedHeight, height) + } + + if string(parsedCommitment) != string(commitment) { + t.Errorf("SplitID() commitment = %s, expected %s", string(parsedCommitment), string(commitment)) + } +} From 3a38c2e65b7be456926de5aae88c8de915c5551d Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Tue, 25 Nov 2025 18:11:45 +0100 Subject: [PATCH 12/35] add da folder into image --- apps/evm/single/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/evm/single/Dockerfile b/apps/evm/single/Dockerfile index d3e1fa797c..ac214e7546 100644 --- a/apps/evm/single/Dockerfile +++ b/apps/evm/single/Dockerfile @@ -3,6 +3,7 @@ FROM golang:1.24-alpine AS build-env WORKDIR /src COPY core core +COPY da da COPY go.mod go.sum ./ RUN go mod download From fcfde11dfeabd967d6b6b0d3d5f5cc4913fc5d3c Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 10:48:44 +0100 Subject: [PATCH 13/35] make tidy all --- apps/evm/go.mod | 2 +- apps/grpc/go.mod | 4 ++-- apps/testapp/go.mod | 2 +- da/go.mod | 1 - da/go.sum | 2 -- test/e2e/go.mod | 1 + 6 files changed, 5 insertions(+), 7 deletions(-) diff --git a/apps/evm/go.mod b/apps/evm/go.mod index 126891fedc..8467b573e6 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -18,7 +18,6 @@ require ( github.com/evstack/ev-node/da v1.0.0-beta.6 github.com/evstack/ev-node/execution/evm v1.0.0-beta.3 github.com/ipfs/go-datastore v0.9.0 - github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 ) @@ -150,6 +149,7 @@ require ( github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect + github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect diff --git a/apps/grpc/go.mod b/apps/grpc/go.mod index fdc2a9c21e..896ecd1429 100644 --- a/apps/grpc/go.mod +++ b/apps/grpc/go.mod @@ -16,8 +16,6 @@ require ( github.com/evstack/ev-node/core v1.0.0-beta.5 github.com/evstack/ev-node/da v1.0.0-beta.6 github.com/evstack/ev-node/execution/grpc v0.0.0 - github.com/ipfs/go-datastore v0.9.0 - github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.1 ) @@ -58,6 +56,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/boxo v0.35.0 // indirect github.com/ipfs/go-cid v0.5.0 // indirect + github.com/ipfs/go-datastore v0.9.0 // indirect github.com/ipfs/go-ds-badger4 v0.1.8 // indirect github.com/ipfs/go-log/v2 v2.8.1 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect @@ -131,6 +130,7 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/quic-go v0.54.1 // indirect github.com/quic-go/webtransport-go v0.9.0 // indirect + github.com/rs/zerolog v1.34.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index 0e6ed2f892..981aac8b45 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -13,7 +13,6 @@ replace ( require ( github.com/celestiaorg/go-header v0.7.3 github.com/evstack/ev-node v1.0.0-beta.9 - github.com/evstack/ev-node/core v1.0.0-beta.5 github.com/evstack/ev-node/da v0.0.0-00010101000000-000000000000 github.com/ipfs/go-datastore v0.9.0 github.com/spf13/cobra v1.10.1 @@ -34,6 +33,7 @@ require ( github.com/dgraph-io/badger/v4 v4.5.1 // indirect github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect + github.com/evstack/ev-node/core v1.0.0-beta.5 // indirect github.com/filecoin-project/go-clock v0.1.0 // indirect github.com/filecoin-project/go-jsonrpc v0.9.0 // indirect github.com/flynn/noise v1.1.0 // indirect diff --git a/da/go.mod b/da/go.mod index 478488dfbc..64da0b0f3f 100644 --- a/da/go.mod +++ b/da/go.mod @@ -3,7 +3,6 @@ module github.com/evstack/ev-node/da go 1.24.1 require ( - github.com/evstack/ev-node/core v1.0.0-beta.5 github.com/filecoin-project/go-jsonrpc v0.9.0 github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.11.1 diff --git a/da/go.sum b/da/go.sum index cfa1dc32e2..2194a6b7c3 100644 --- a/da/go.sum +++ b/da/go.sum @@ -13,8 +13,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evstack/ev-node/core v1.0.0-beta.5 h1:lgxE8XiF3U9pcFgh7xuKMgsOGvLBGRyd9kc9MR4WL0o= -github.com/evstack/ev-node/core v1.0.0-beta.5/go.mod h1:n2w/LhYQTPsi48m6lMj16YiIqsaQw6gxwjyJvR+B3sY= github.com/filecoin-project/go-jsonrpc v0.9.0 h1:G47qEF52w7GholpI21vPSTVBFvsrip6geIoqNiqyZtQ= github.com/filecoin-project/go-jsonrpc v0.9.0/go.mod h1:OG7kVBVh/AbDFHIwx7Kw0l9ARmKOS6gGOr0LbdBpbLc= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= diff --git a/test/e2e/go.mod b/test/e2e/go.mod index eda8633889..a1bc7b4c11 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -16,6 +16,7 @@ require ( replace ( github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/da => ../../da github.com/evstack/ev-node/execution/evm => ../../execution/evm github.com/evstack/ev-node/execution/evm/test => ../../execution/evm/test ) From 912e8cb0f1558a5195fc65ca3d41752e13bb881e Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 13:04:59 +0100 Subject: [PATCH 14/35] fix lint --- block/components.go | 2 +- block/components_test.go | 2 +- block/internal/syncing/syncer.go | 11 +---------- block/internal/syncing/syncer_backoff_test.go | 2 +- block/internal/syncing/syncer_test.go | 3 +-- da/testing.go | 3 +-- node/full.go | 2 +- node/helpers_test.go | 2 +- node/node.go | 2 +- pkg/cmd/run_node.go | 2 +- pkg/cmd/run_node_test.go | 2 +- pkg/rpc/server/server.go | 2 +- sequencers/single/sequencer.go | 2 +- sequencers/single/sequencer_test.go | 2 +- 14 files changed, 14 insertions(+), 25 deletions(-) diff --git a/block/components.go b/block/components.go index 4b2543ca04..6c78b46ad1 100644 --- a/block/components.go +++ b/block/components.go @@ -13,9 +13,9 @@ import ( "github.com/evstack/ev-node/block/internal/reaping" "github.com/evstack/ev-node/block/internal/submitting" "github.com/evstack/ev-node/block/internal/syncing" - da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer" diff --git a/block/components_test.go b/block/components_test.go index 3fa9cae8f5..9e6991605d 100644 --- a/block/components_test.go +++ b/block/components_test.go @@ -15,8 +15,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - da "github.com/evstack/ev-node/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/signer/noop" diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 01900c49a6..953a41ea15 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -13,8 +13,8 @@ import ( "github.com/rs/zerolog" "golang.org/x/sync/errgroup" - da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" @@ -679,15 +679,6 @@ func (s *Syncer) sendCriticalError(err error) { } } -// sendNonBlockingSignal sends a signal without blocking -func (s *Syncer) sendNonBlockingSignal(ch chan struct{}, name string) { - select { - case ch <- struct{}{}: - default: - s.logger.Debug().Str("channel", name).Msg("channel full, signal dropped") - } -} - // processPendingEvents fetches and processes pending events from cache // optimistically fetches the next events from cache until no matching heights are found func (s *Syncer) processPendingEvents() { diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index d4ca3717d5..d302a857a0 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -15,8 +15,8 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/core/execution" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index ea2d2c5b8d..4bc73f6bba 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -9,8 +9,8 @@ import ( "testing" "time" - da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/core/execution" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/signer/noop" @@ -644,4 +644,3 @@ func requireEmptyChan(t *testing.T, errorCh chan error) { default: } } - diff --git a/da/testing.go b/da/testing.go index a66622bd77..b730f60b7e 100644 --- a/da/testing.go +++ b/da/testing.go @@ -109,8 +109,7 @@ func (d *DummyDA) GetIDs(ctx context.Context, height uint64, namespace []byte) ( // Filter IDs by namespace filteredIDs := make([]ID, 0) for _, id := range ids { - idStr := string(id) - if ns, exists := d.namespaceByID[idStr]; exists && bytes.Equal(ns, namespace) { + if ns, exists := d.namespaceByID[string(id)]; exists && bytes.Equal(ns, namespace) { filteredIDs = append(filteredIDs, id) } } diff --git a/node/full.go b/node/full.go index f6c7e73d28..c41e7099f8 100644 --- a/node/full.go +++ b/node/full.go @@ -18,9 +18,9 @@ import ( "github.com/evstack/ev-node/block" - da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" genesispkg "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" diff --git a/node/helpers_test.go b/node/helpers_test.go index e9f20ff3c9..da7f43dbdb 100644 --- a/node/helpers_test.go +++ b/node/helpers_test.go @@ -17,9 +17,9 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" - da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" evconfig "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/p2p" diff --git a/node/node.go b/node/node.go index 67111d9b51..218e873275 100644 --- a/node/node.go +++ b/node/node.go @@ -5,9 +5,9 @@ import ( "github.com/rs/zerolog" "github.com/evstack/ev-node/block" - da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/p2p" diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index 102273b064..f3544a3bbb 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -16,9 +16,9 @@ import ( "github.com/rs/zerolog" "github.com/spf13/cobra" - da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/node" rollconf "github.com/evstack/ev-node/pkg/config" genesispkg "github.com/evstack/ev-node/pkg/genesis" diff --git a/pkg/cmd/run_node_test.go b/pkg/cmd/run_node_test.go index 3db53492b4..c4058e0801 100644 --- a/pkg/cmd/run_node_test.go +++ b/pkg/cmd/run_node_test.go @@ -13,9 +13,9 @@ import ( "github.com/spf13/cobra" "github.com/stretchr/testify/assert" - da "github.com/evstack/ev-node/da" coreexecutor "github.com/evstack/ev-node/core/execution" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/node" rollconf "github.com/evstack/ev-node/pkg/config" genesis "github.com/evstack/ev-node/pkg/genesis" diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index 7e97db3273..cb4a22b295 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -10,9 +10,9 @@ import ( "encoding/binary" "errors" - goheader "github.com/celestiaorg/go-header" "connectrpc.com/connect" "connectrpc.com/grpcreflect" + goheader "github.com/celestiaorg/go-header" da "github.com/evstack/ev-node/da" ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" diff --git a/sequencers/single/sequencer.go b/sequencers/single/sequencer.go index c8db9708c1..c3b3c14200 100644 --- a/sequencers/single/sequencer.go +++ b/sequencers/single/sequencer.go @@ -10,8 +10,8 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/rs/zerolog" - da "github.com/evstack/ev-node/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" ) // ErrInvalidId is returned when the chain id is invalid diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index 609a442a2c..c9c924e147 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -13,8 +13,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - da "github.com/evstack/ev-node/da" coresequencer "github.com/evstack/ev-node/core/sequencer" + da "github.com/evstack/ev-node/da" damocks "github.com/evstack/ev-node/test/mocks" ) From 701023ff30ab872509b00838ef8071228eab0f1f Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 13:17:04 +0100 Subject: [PATCH 15/35] fix linter problems --- core/sequencer/sequencing_test.go | 4 ++-- sequencers/single/sequencer_test.go | 13 +++---------- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/core/sequencer/sequencing_test.go b/core/sequencer/sequencing_test.go index 53d51d6e21..dd1ac4c389 100644 --- a/core/sequencer/sequencing_test.go +++ b/core/sequencer/sequencing_test.go @@ -54,9 +54,9 @@ func TestBatchHash(t *testing.T) { name: "transactions with empty data", batch: &Batch{ Transactions: [][]byte{ - []byte{}, + {}, []byte("normal transaction"), - []byte{}, + {}, }, }, wantErr: false, diff --git a/sequencers/single/sequencer_test.go b/sequencers/single/sequencer_test.go index c9c924e147..e82e17cf0e 100644 --- a/sequencers/single/sequencer_test.go +++ b/sequencers/single/sequencer_test.go @@ -38,16 +38,9 @@ func TestNewSequencer(t *testing.T) { }() // Check if the sequencer was created with the correct values - if seq == nil { - t.Fatal("Expected sequencer to not be nil") - } - - if seq.queue == nil { - t.Fatal("Expected batch queue to not be nil") - } - if seq.da == nil { - t.Fatal("Expected DA client to not be nil") - } + require.NotNil(t, seq, "Expected sequencer to not be nil") + require.NotNil(t, seq.queue, "Expected batch queue to not be nil") + require.NotNil(t, seq.da, "Expected DA client to not be nil") } func TestSequencer_SubmitBatchTxs(t *testing.T) { From 6e775ff7d5debc6ee843514a98055e952158e68a Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 17:52:05 +0100 Subject: [PATCH 16/35] fix e2e tests --- da/cmd/local-da/server.go | 227 ++++++++++++++++++++++++++++++++++-- test/e2e/evm_test_common.go | 41 +++++++ 2 files changed, 261 insertions(+), 7 deletions(-) diff --git a/da/cmd/local-da/server.go b/da/cmd/local-da/server.go index f4e7b9d338..b45e4ad47c 100644 --- a/da/cmd/local-da/server.go +++ b/da/cmd/local-da/server.go @@ -2,6 +2,9 @@ package main import ( "context" + "crypto/sha256" + "encoding/binary" + "fmt" "net" "net/http" "sync/atomic" @@ -13,6 +16,27 @@ import ( "github.com/evstack/ev-node/da" ) +// Blob represents a Celestia-compatible blob for the blob API +type Blob struct { + Namespace []byte `json:"namespace"` + Data []byte `json:"data"` + ShareVer uint32 `json:"share_version"` + Commitment []byte `json:"commitment"` + Index int `json:"index"` +} + +// Proof represents a Celestia-compatible inclusion proof +type Proof struct { + Data []byte `json:"data"` +} + +// SubmitOptions contains options for blob submission +type SubmitOptions struct { + Fee float64 `json:"fee,omitempty"` + GasLimit uint64 `json:"gas_limit,omitempty"` + SignerAddress string `json:"signer_address,omitempty"` +} + // Server is a jsonrpc service that serves the LocalDA implementation type Server struct { logger zerolog.Logger @@ -20,6 +44,7 @@ type Server struct { rpc *jsonrpc.RPCServer listener net.Listener daImpl da.DA + localDA *LocalDA // For blob API access to internal data started atomic.Bool } @@ -72,6 +97,184 @@ func (s *serverInternalAPI) SubmitWithOptions(ctx context.Context, blobs []da.Bl return s.daImpl.SubmitWithOptions(ctx, blobs, gasPrice, ns, options) } +// blobAPI provides Celestia-compatible Blob API methods +type blobAPI struct { + logger zerolog.Logger + localDA *LocalDA +} + +// Submit submits blobs and returns the DA height (Celestia blob API compatible) +func (b *blobAPI) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions) (uint64, error) { + b.logger.Debug().Int("num_blobs", len(blobs)).Msg("blob.Submit called") + + if len(blobs) == 0 { + return 0, nil + } + + ns := blobs[0].Namespace + + rawBlobs := make([][]byte, len(blobs)) + for i, blob := range blobs { + rawBlobs[i] = blob.Data + } + + var gasPrice float64 + if opts != nil { + gasPrice = opts.Fee + } + + _, err := b.localDA.Submit(ctx, rawBlobs, gasPrice, ns) + if err != nil { + return 0, err + } + + b.localDA.mu.Lock() + height := b.localDA.height + b.localDA.mu.Unlock() + + b.logger.Info().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("blob.Submit successful") + return height, nil +} + +// Get retrieves a single blob by commitment at a given height (Celestia blob API compatible) +func (b *blobAPI) Get(ctx context.Context, height uint64, ns []byte, commitment []byte) (*Blob, error) { + b.logger.Debug().Uint64("height", height).Msg("blob.Get called") + + blobs, err := b.GetAll(ctx, height, [][]byte{ns}) + if err != nil { + return nil, err + } + + for _, blob := range blobs { + if len(commitment) == 0 || bytesEqual(blob.Commitment, commitment) { + return blob, nil + } + } + + return nil, nil +} + +// GetAll retrieves all blobs at a given height for the specified namespaces (Celestia blob API compatible) +func (b *blobAPI) GetAll(ctx context.Context, height uint64, namespaces [][]byte) ([]*Blob, error) { + b.logger.Debug().Uint64("height", height).Int("num_namespaces", len(namespaces)).Msg("blob.GetAll called") + + if len(namespaces) == 0 { + return []*Blob{}, nil + } + + ns := namespaces[0] + + b.localDA.mu.Lock() + defer b.localDA.mu.Unlock() + + if height > b.localDA.height { + b.logger.Debug().Uint64("requested", height).Uint64("current", b.localDA.height).Msg("blob.GetAll: height in future") + return nil, fmt.Errorf("height %d from future, current height is %d", height, b.localDA.height) + } + + kvps, ok := b.localDA.data[height] + if !ok { + b.logger.Debug().Uint64("height", height).Msg("blob.GetAll: no data for height") + return []*Blob{}, nil + } + + blobs := make([]*Blob, 0, len(kvps)) + for i, kv := range kvps { + var commitment []byte + if len(kv.key) > 8 { + commitment = kv.key[8:] + } else { + hash := sha256.Sum256(kv.value) + commitment = hash[:] + } + + blobs = append(blobs, &Blob{ + Namespace: ns, + Data: kv.value, + ShareVer: 0, + Commitment: commitment, + Index: i, + }) + } + + b.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("blob.GetAll successful") + return blobs, nil +} + +// GetProof retrieves the inclusion proof for a blob (Celestia blob API compatible) +func (b *blobAPI) GetProof(ctx context.Context, height uint64, ns []byte, commitment []byte) (*Proof, error) { + b.logger.Debug().Uint64("height", height).Msg("blob.GetProof called") + + b.localDA.mu.Lock() + defer b.localDA.mu.Unlock() + + kvps, ok := b.localDA.data[height] + if !ok { + return nil, nil + } + + for _, kv := range kvps { + var blobCommitment []byte + if len(kv.key) > 8 { + blobCommitment = kv.key[8:] + } + + if len(commitment) == 0 || bytesEqual(blobCommitment, commitment) { + proof := b.localDA.getProof(kv.key, kv.value) + return &Proof{Data: proof}, nil + } + } + + return nil, nil +} + +// Included checks whether a blob is included in the DA layer (Celestia blob API compatible) +func (b *blobAPI) Included(ctx context.Context, height uint64, ns []byte, proof *Proof, commitment []byte) (bool, error) { + b.logger.Debug().Uint64("height", height).Msg("blob.Included called") + + b.localDA.mu.Lock() + defer b.localDA.mu.Unlock() + + kvps, ok := b.localDA.data[height] + if !ok { + return false, nil + } + + for _, kv := range kvps { + var blobCommitment []byte + if len(kv.key) > 8 { + blobCommitment = kv.key[8:] + } + + if bytesEqual(blobCommitment, commitment) { + return true, nil + } + } + + return false, nil +} + +// bytesEqual compares two byte slices +func bytesEqual(a, b []byte) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +// makeID creates an ID from height and commitment +func makeID(height uint64, commitment []byte) []byte { + id := make([]byte, 8+len(commitment)) + binary.LittleEndian.PutUint64(id, height) + copy(id[8:], commitment) + return id +} + func getKnownErrorsMapping() jsonrpc.Errors { errs := jsonrpc.NewErrors() errs.Register(jsonrpc.ErrorCode(da.StatusNotFound), &da.ErrBlobNotFound) @@ -86,12 +289,14 @@ func getKnownErrorsMapping() jsonrpc.Errors { } // NewServer creates a new JSON-RPC server for the LocalDA implementation -func NewServer(logger zerolog.Logger, address, port string, daImplementation da.DA) *Server { +// It registers both the legacy "da" namespace and the Celestia-compatible "blob" namespace +func NewServer(logger zerolog.Logger, address, port string, localDA *LocalDA) *Server { rpc := jsonrpc.NewServer(jsonrpc.WithServerErrors(getKnownErrorsMapping())) srv := &Server{ - rpc: rpc, - logger: logger, - daImpl: daImplementation, + rpc: rpc, + logger: logger, + daImpl: localDA, + localDA: localDA, srv: &http.Server{ Addr: address + ":" + port, ReadHeaderTimeout: 2 * time.Second, @@ -99,12 +304,20 @@ func NewServer(logger zerolog.Logger, address, port string, daImplementation da. } srv.srv.Handler = http.HandlerFunc(rpc.ServeHTTP) - apiHandler := &serverInternalAPI{ + // Register legacy "da" namespace API + daAPIHandler := &serverInternalAPI{ logger: logger, - daImpl: daImplementation, + daImpl: localDA, + } + srv.rpc.Register("da", daAPIHandler) + + // Register Celestia-compatible "blob" namespace API + blobAPIHandler := &blobAPI{ + logger: logger, + localDA: localDA, } + srv.rpc.Register("blob", blobAPIHandler) - srv.rpc.Register("da", apiHandler) return srv } diff --git a/test/e2e/evm_test_common.go b/test/e2e/evm_test_common.go index 9d502cc218..535e25e752 100644 --- a/test/e2e/evm_test_common.go +++ b/test/e2e/evm_test_common.go @@ -404,6 +404,10 @@ func setupFullNode(t *testing.T, sut *SystemUnderTest, fullNodeHome, sequencerHo err = os.WriteFile(fullNodeGenesis, genesisData, 0644) require.NoError(t, err, "failed to write full node genesis file") + // Read namespace from sequencer config to pass to full node + sequencerConfigPath := filepath.Join(sequencerHome, "config", "evnode.yaml") + namespace := extractNamespaceFromConfig(t, sequencerConfigPath) + // Create JWT secret file for full node fullNodeJwtSecretFile := createJWTSecretFile(t, fullNodeHome, fullNodeJwtSecret) @@ -418,6 +422,7 @@ func setupFullNode(t *testing.T, sut *SystemUnderTest, fullNodeHome, sequencerHo "--evm.eth-url", endpoints.GetFullNodeEthURL(), "--rollkit.da.block_time", DefaultDABlockTime, "--rollkit.da.address", endpoints.GetDAAddress(), + "--rollkit.da.namespace", namespace, // Use same namespace as sequencer "--rollkit.rpc.address", endpoints.GetFullNodeRPCListen(), "--rollkit.p2p.listen_address", endpoints.GetFullNodeP2PAddress(), } @@ -427,6 +432,42 @@ func setupFullNode(t *testing.T, sut *SystemUnderTest, fullNodeHome, sequencerHo sut.AwaitNodeLive(t, endpoints.GetFullNodeRPCAddress(), NodeStartupTimeout) } +// extractNamespaceFromConfig reads the namespace from a config file +func extractNamespaceFromConfig(t *testing.T, configPath string) string { + t.Helper() + + configData, err := os.ReadFile(configPath) + require.NoError(t, err, "failed to read config file") + + // Parse YAML - look for "namespace:" under "da:" section + lines := strings.Split(string(configData), "\n") + inDASection := false + for _, line := range lines { + // Check if we're entering the da: section + if strings.TrimSpace(line) == "da:" { + inDASection = true + continue + } + // Check if we're leaving the da: section (new top-level key) + if inDASection && len(line) > 0 && line[0] != ' ' && line[0] != '\t' { + inDASection = false + } + // Look for namespace: inside the da: section + if inDASection { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(trimmed, "namespace:") { + parts := strings.SplitN(trimmed, ":", 2) + if len(parts) == 2 { + return strings.TrimSpace(parts[1]) + } + } + } + } + + t.Fatal("namespace not found in config file") + return "" +} + // Global nonce counter to ensure unique nonces across multiple transaction submissions var globalNonce uint64 = 0 From 8bd09fd37446ceb9674b3d65a6107f610362adcb Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 19:25:26 +0100 Subject: [PATCH 17/35] fix: correct broken documentation links - Fix relative paths in apps/grpc/README.md (../../../ -> ../../) - Add missing .md extension in docs/guides/full-node.md link - Wrap localhost URL in backticks in tools/da-debug/README.md --- apps/grpc/README.md | 4 ++-- docs/guides/full-node.md | 2 +- tools/da-debug/README.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/grpc/README.md b/apps/grpc/README.md index dac9a847f2..51a49ebc09 100644 --- a/apps/grpc/README.md +++ b/apps/grpc/README.md @@ -150,5 +150,5 @@ If you have issues connecting to the DA layer: ## See Also - [Evolve Documentation](https://ev.xyz) -- [gRPC Execution Interface](../../../execution/grpc/README.md) -- [Single Sequencer Documentation](../../../sequencers/single/README.md) +- [gRPC Execution Interface](../../execution/grpc/README.md) +- [Single Sequencer Documentation](../../sequencers/single/README.md) diff --git a/docs/guides/full-node.md b/docs/guides/full-node.md index 0022f65082..d6d597a000 100644 --- a/docs/guides/full-node.md +++ b/docs/guides/full-node.md @@ -4,7 +4,7 @@ This guide covers how to set up a full node to run alongside a sequencer node in a Evolve-based blockchain network. A full node maintains a complete copy of the blockchain and helps validate transactions, improving the network's decentralization and security. -> **Note: The guide on how to run an evolve EVM full node can be found [in the evm section](./evm/single#setting-up-a-full-node).** +> **Note: The guide on how to run an evolve EVM full node can be found [in the evm section](./evm/single.md#setting-up-a-full-node).** ## Prerequisites diff --git a/tools/da-debug/README.md b/tools/da-debug/README.md index ef6eeae18c..7db8a17c14 100644 --- a/tools/da-debug/README.md +++ b/tools/da-debug/README.md @@ -75,7 +75,7 @@ da-debug search 100 "0x000000000000000000000000000000000000000000000000000000746 All commands support these global flags: -- `--da-url string`: Celestia node RPC URL (default: "http://localhost:26658") +- `--da-url string`: Celestia node RPC URL (default: `http://localhost:26658`) - `--auth-token string`: Authentication token for Celestia node - `--timeout duration`: Request timeout (default: 30s) - `--verbose`: Enable verbose logging From 91d015df6f03a6f92e311f845b08cf56788a5525 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 19:29:58 +0100 Subject: [PATCH 18/35] ci: ignore medium.com links in markdown link checker Medium returns 403 for automated requests, causing false positives in the CI link checker. --- .mlc_config.json | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .mlc_config.json diff --git a/.mlc_config.json b/.mlc_config.json new file mode 100644 index 0000000000..e39c2d2b7d --- /dev/null +++ b/.mlc_config.json @@ -0,0 +1,8 @@ +{ + "ignorePatterns": [ + { + "pattern": "^https://medium\\.com" + } + ], + "aliveStatusCodes": [200, 206, 403] +} From 12d4876cd6df10b8cc667e5aa8a22fd589e41143 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 19:34:20 +0100 Subject: [PATCH 19/35] fix: rename mlc config file (remove leading dot) --- .mlc_config.json => mlc_config.json | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .mlc_config.json => mlc_config.json (100%) diff --git a/.mlc_config.json b/mlc_config.json similarity index 100% rename from .mlc_config.json rename to mlc_config.json From 97ee61d3264d2a660e082a23de7e891de3292128 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 19:35:27 +0100 Subject: [PATCH 20/35] docs: remove dead Medium link from data availability page --- docs/learn/data-availability.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/learn/data-availability.md b/docs/learn/data-availability.md index 0a744ef945..deb7633fa2 100644 --- a/docs/learn/data-availability.md +++ b/docs/learn/data-availability.md @@ -5,7 +5,6 @@ Data availability (DA) is a core of Evolve's. Evolve utilize's data availabilit Learn more about data availability: - [What is DA](https://celestia.org/what-is-da/) -- [The importance of DA for Rollups](https://medium.com/zeeve/exploring-data-availability-layer-and-its-importance-in-rollups-0a4fbf2e0ffc) ## How Evolve Handles Data Availability From 33d5a9bf8f6f4de372390490fc63f8abf02c53ce Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 19:35:53 +0100 Subject: [PATCH 21/35] Revert "docs: remove dead Medium link from data availability page" This reverts commit 97ee61d3264d2a660e082a23de7e891de3292128. --- docs/learn/data-availability.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/learn/data-availability.md b/docs/learn/data-availability.md index deb7633fa2..0a744ef945 100644 --- a/docs/learn/data-availability.md +++ b/docs/learn/data-availability.md @@ -5,6 +5,7 @@ Data availability (DA) is a core of Evolve's. Evolve utilize's data availabilit Learn more about data availability: - [What is DA](https://celestia.org/what-is-da/) +- [The importance of DA for Rollups](https://medium.com/zeeve/exploring-data-availability-layer-and-its-importance-in-rollups-0a4fbf2e0ffc) ## How Evolve Handles Data Availability From f8f43e1eef072e4650827be99854a0d15ff4cd86 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 19:36:36 +0100 Subject: [PATCH 22/35] ci: add mlc_config.json to docs folder for link checker --- docs/mlc_config.json | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 docs/mlc_config.json diff --git a/docs/mlc_config.json b/docs/mlc_config.json new file mode 100644 index 0000000000..e39c2d2b7d --- /dev/null +++ b/docs/mlc_config.json @@ -0,0 +1,8 @@ +{ + "ignorePatterns": [ + { + "pattern": "^https://medium\\.com" + } + ], + "aliveStatusCodes": [200, 206, 403] +} From 6c75e43e7640bcef9c7fb4ef20d31f154f7028f4 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 19:37:36 +0100 Subject: [PATCH 23/35] revert: remove mlc_config.json from docs folder --- docs/mlc_config.json | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 docs/mlc_config.json diff --git a/docs/mlc_config.json b/docs/mlc_config.json deleted file mode 100644 index e39c2d2b7d..0000000000 --- a/docs/mlc_config.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^https://medium\\.com" - } - ], - "aliveStatusCodes": [200, 206, 403] -} From 24ce8ea5cada80535780226e0a671dba57b42873 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Wed, 26 Nov 2025 19:59:56 +0100 Subject: [PATCH 24/35] docs: add comment explaining Medium 403 issue in mlc_config.json --- mlc_config.json | 1 + 1 file changed, 1 insertion(+) diff --git a/mlc_config.json b/mlc_config.json index e39c2d2b7d..24b6f78160 100644 --- a/mlc_config.json +++ b/mlc_config.json @@ -1,4 +1,5 @@ { + "$comment": "Medium.com returns 403 for automated requests in CI, even though the links are valid", "ignorePatterns": [ { "pattern": "^https://medium\\.com" From f2fc08031f63b354d10b401184d189af44c67868 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 12:59:42 +0100 Subject: [PATCH 25/35] create commitment on the client --- da/celestia/client.go | 30 +++--- da/celestia/commitment.go | 53 +++++++++++ da/celestia/commitment_test.go | 166 +++++++++++++++++++++++++++++++++ da/go.mod | 7 ++ da/go.sum | 37 ++++++++ 5 files changed, 282 insertions(+), 11 deletions(-) create mode 100644 da/celestia/commitment.go create mode 100644 da/celestia/commitment_test.go diff --git a/da/celestia/client.go b/da/celestia/client.go index 7328637ae8..cae660adf3 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -222,12 +222,18 @@ func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPric return nil, fmt.Errorf("invalid namespace: %w", err) } - // Convert blobs to Celestia format + // Convert blobs to Celestia format and calculate commitments locally celestiaBlobs := make([]*Blob, len(blobs)) for i, blob := range blobs { + // Calculate commitment locally using the same algorithm as celestia-node + commitment, err := CreateCommitment(blob, namespace) + if err != nil { + return nil, fmt.Errorf("failed to create commitment for blob %d: %w", i, err) + } celestiaBlobs[i] = &Blob{ - Namespace: namespace, - Data: blob, + Namespace: namespace, + Data: blob, + Commitment: commitment, } } @@ -254,11 +260,10 @@ func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPric return nil, err } - // Create IDs from height only (commitments not needed for Submit result) - // Commitments will be retrieved later via GetIDs when needed + // Create IDs from height and locally-computed commitments ids := make([]da.ID, len(celestiaBlobs)) - for i := range celestiaBlobs { - ids[i] = makeID(height, nil) + for i, blob := range celestiaBlobs { + ids[i] = makeID(height, blob.Commitment) } return ids, nil @@ -363,12 +368,15 @@ func (c *Client) GetProofs(ctx context.Context, ids []da.ID, namespace []byte) ( } // Commit creates commitments for the given blobs. -// Note: Celestia generates commitments automatically during submission, -// so this is a no-op that returns nil commitments. +// Commitments are computed locally using the same algorithm as celestia-node. func (c *Client) Commit(ctx context.Context, blobs []da.Blob, namespace []byte) ([]da.Commitment, error) { commitments := make([]da.Commitment, len(blobs)) - for i := range blobs { - commitments[i] = nil + for i, blob := range blobs { + commitment, err := CreateCommitment(blob, namespace) + if err != nil { + return nil, fmt.Errorf("failed to create commitment for blob %d: %w", i, err) + } + commitments[i] = commitment } return commitments, nil } diff --git a/da/celestia/commitment.go b/da/celestia/commitment.go new file mode 100644 index 0000000000..9963aa1c14 --- /dev/null +++ b/da/celestia/commitment.go @@ -0,0 +1,53 @@ +package celestia + +import ( + "fmt" + + "github.com/celestiaorg/go-square/merkle" + "github.com/celestiaorg/go-square/v3/inclusion" + libshare "github.com/celestiaorg/go-square/v3/share" +) + +// subtreeRootThreshold matches the value used by celestia-app. +// This determines the size of subtrees when computing blob commitments. +const subtreeRootThreshold = 64 + +// CreateCommitment computes the commitment for a blob. +// The commitment is computed using the same algorithm as celestia-node: +// 1. Split the blob data into shares +// 2. Build a Merkle tree over the shares +// 3. Return the Merkle root +func CreateCommitment(data []byte, namespace []byte) (Commitment, error) { + // Create namespace from bytes + ns, err := libshare.NewNamespaceFromBytes(namespace) + if err != nil { + return nil, fmt.Errorf("failed to create namespace: %w", err) + } + + // Create a blob with share version 0 (default) + blob, err := libshare.NewBlob(ns, data, libshare.ShareVersionZero, nil) + if err != nil { + return nil, fmt.Errorf("failed to create blob: %w", err) + } + + // Compute commitment using the same function as celestia-node + commitment, err := inclusion.CreateCommitment(blob, merkle.HashFromByteSlices, subtreeRootThreshold) + if err != nil { + return nil, fmt.Errorf("failed to create commitment: %w", err) + } + + return commitment, nil +} + +// CreateCommitments computes commitments for multiple blobs. +func CreateCommitments(data [][]byte, namespace []byte) ([]Commitment, error) { + commitments := make([]Commitment, len(data)) + for i, d := range data { + commitment, err := CreateCommitment(d, namespace) + if err != nil { + return nil, fmt.Errorf("failed to create commitment for blob %d: %w", i, err) + } + commitments[i] = commitment + } + return commitments, nil +} diff --git a/da/celestia/commitment_test.go b/da/celestia/commitment_test.go new file mode 100644 index 0000000000..2299096890 --- /dev/null +++ b/da/celestia/commitment_test.go @@ -0,0 +1,166 @@ +package celestia + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCreateCommitment(t *testing.T) { + // Create a valid 29-byte namespace (version 0 + 28 bytes ID) + namespace := make([]byte, 29) + namespace[0] = 0 // version 0 + + tests := []struct { + name string + data []byte + namespace []byte + wantErr bool + errContains string + }{ + { + name: "valid small blob", + data: []byte("hello world"), + namespace: namespace, + wantErr: false, + }, + { + name: "valid larger blob", + data: make([]byte, 1024), + namespace: namespace, + wantErr: false, + }, + { + name: "empty blob not allowed", + data: []byte{}, + namespace: namespace, + wantErr: true, + errContains: "empty", + }, + { + name: "invalid namespace too short", + data: []byte("test"), + namespace: make([]byte, 10), + wantErr: true, + errContains: "namespace", + }, + { + name: "invalid namespace too long", + data: []byte("test"), + namespace: make([]byte, 30), + wantErr: true, + errContains: "namespace", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + commitment, err := CreateCommitment(tt.data, tt.namespace) + + if tt.wantErr { + require.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + assert.Nil(t, commitment) + } else { + require.NoError(t, err) + require.NotNil(t, commitment) + // Commitment should be non-empty + assert.Greater(t, len(commitment), 0) + } + }) + } +} + +func TestCreateCommitment_Deterministic(t *testing.T) { + // Create a valid namespace + namespace := make([]byte, 29) + namespace[0] = 0 + + data := []byte("test data for deterministic commitment") + + // Create commitment twice + commitment1, err := CreateCommitment(data, namespace) + require.NoError(t, err) + + commitment2, err := CreateCommitment(data, namespace) + require.NoError(t, err) + + // Should be identical + assert.Equal(t, commitment1, commitment2) +} + +func TestCreateCommitment_DifferentData(t *testing.T) { + // Create a valid namespace + namespace := make([]byte, 29) + namespace[0] = 0 + + data1 := []byte("data one") + data2 := []byte("data two") + + commitment1, err := CreateCommitment(data1, namespace) + require.NoError(t, err) + + commitment2, err := CreateCommitment(data2, namespace) + require.NoError(t, err) + + // Should be different + assert.NotEqual(t, commitment1, commitment2) +} + +func TestCreateCommitment_DifferentNamespace(t *testing.T) { + // Create two different valid namespaces + namespace1 := make([]byte, 29) + namespace1[0] = 0 + namespace1[28] = 1 + + namespace2 := make([]byte, 29) + namespace2[0] = 0 + namespace2[28] = 2 + + data := []byte("same data") + + commitment1, err := CreateCommitment(data, namespace1) + require.NoError(t, err) + + commitment2, err := CreateCommitment(data, namespace2) + require.NoError(t, err) + + // Should be different due to different namespaces + assert.NotEqual(t, commitment1, commitment2) +} + +func TestCreateCommitments(t *testing.T) { + // Create a valid namespace + namespace := make([]byte, 29) + namespace[0] = 0 + + blobs := [][]byte{ + []byte("blob one"), + []byte("blob two"), + []byte("blob three"), + } + + commitments, err := CreateCommitments(blobs, namespace) + require.NoError(t, err) + require.Len(t, commitments, 3) + + // All commitments should be non-empty and different + for i, c := range commitments { + assert.Greater(t, len(c), 0, "commitment %d should not be empty", i) + } + + assert.NotEqual(t, commitments[0], commitments[1]) + assert.NotEqual(t, commitments[1], commitments[2]) + assert.NotEqual(t, commitments[0], commitments[2]) +} + +func TestCreateCommitments_Empty(t *testing.T) { + namespace := make([]byte, 29) + + commitments, err := CreateCommitments([][]byte{}, namespace) + require.NoError(t, err) + assert.Len(t, commitments, 0) +} diff --git a/da/go.mod b/da/go.mod index 64da0b0f3f..cf53b0b2fe 100644 --- a/da/go.mod +++ b/da/go.mod @@ -3,13 +3,17 @@ module github.com/evstack/ev-node/da go 1.24.1 require ( + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 + github.com/celestiaorg/go-square/v3 v3.0.2 github.com/filecoin-project/go-jsonrpc v0.9.0 github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.11.1 ) require ( + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect @@ -24,8 +28,11 @@ require ( go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/sync v0.17.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/da/go.sum b/da/go.sum index 2194a6b7c3..80bbfb1159 100644 --- a/da/go.sum +++ b/da/go.sum @@ -1,5 +1,11 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= +github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= +github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -16,6 +22,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/filecoin-project/go-jsonrpc v0.9.0 h1:G47qEF52w7GholpI21vPSTVBFvsrip6geIoqNiqyZtQ= github.com/filecoin-project/go-jsonrpc v0.9.0/go.mod h1:OG7kVBVh/AbDFHIwx7Kw0l9ARmKOS6gGOr0LbdBpbLc= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= @@ -38,6 +46,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -46,6 +56,7 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/ipfs/go-log/v2 v2.0.8 h1:3b3YNopMHlj4AvyhWAx0pDxqSQWYi4/WuWO7yRV6/Qg= github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -87,6 +98,14 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -101,24 +120,35 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -138,8 +168,13 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -161,6 +196,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From 0e8ff949e32c7abc2bc14100b4eb605046aa50b0 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 13:12:26 +0100 Subject: [PATCH 26/35] make tidy all --- apps/evm/go.mod | 2 ++ apps/evm/go.sum | 12 ++++++++++++ apps/grpc/go.mod | 2 ++ apps/grpc/go.sum | 12 ++++++++++++ apps/testapp/go.mod | 2 ++ apps/testapp/go.sum | 12 ++++++++++++ tools/da-debug/go.mod | 6 +++++- tools/da-debug/go.sum | 18 ++++++++++++++++-- 8 files changed, 63 insertions(+), 3 deletions(-) diff --git a/apps/evm/go.mod b/apps/evm/go.mod index c801f095c7..6c05cb0823 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -30,7 +30,9 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/consensys/gnark-crypto v0.18.1 // indirect github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect diff --git a/apps/evm/go.sum b/apps/evm/go.sum index 173fb3ba0d..702caaa30f 100644 --- a/apps/evm/go.sum +++ b/apps/evm/go.sum @@ -34,8 +34,12 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/celestiaorg/utils v0.1.0 h1:WsP3O8jF7jKRgLNFmlDCwdThwOFMFxg0MnqhkLFVxPo= github.com/celestiaorg/utils v0.1.0/go.mod h1:vQTh7MHnvpIeCQZ2/Ph+w7K1R2UerDheZbgJEJD2hSU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -183,6 +187,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -522,6 +528,12 @@ github.com/supranational/blst v0.3.16-0.20250831170142-f48500c1fdbe/go.mod h1:jZ github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= diff --git a/apps/grpc/go.mod b/apps/grpc/go.mod index a829f366b4..7b195a4aa8 100644 --- a/apps/grpc/go.mod +++ b/apps/grpc/go.mod @@ -26,7 +26,9 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/celestiaorg/go-header v0.7.4 // indirect github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect diff --git a/apps/grpc/go.sum b/apps/grpc/go.sum index c2e0e46a7d..d49ddfccbd 100644 --- a/apps/grpc/go.sum +++ b/apps/grpc/go.sum @@ -24,8 +24,12 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/celestiaorg/utils v0.1.0 h1:WsP3O8jF7jKRgLNFmlDCwdThwOFMFxg0MnqhkLFVxPo= github.com/celestiaorg/utils v0.1.0/go.mod h1:vQTh7MHnvpIeCQZ2/Ph+w7K1R2UerDheZbgJEJD2hSU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -123,6 +127,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -429,6 +435,12 @@ github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index f15d133d6f..bdb2519c9d 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -25,7 +25,9 @@ require ( github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/celestiaorg/go-libp2p-messenger v0.2.2 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 // indirect github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index eeafba1a84..6fbd5b9d26 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -24,8 +24,12 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/celestiaorg/go-libp2p-messenger v0.2.2 h1:osoUfqjss7vWTIZrrDSy953RjQz+ps/vBFE7bychLEc= github.com/celestiaorg/go-libp2p-messenger v0.2.2/go.mod h1:oTCRV5TfdO7V/k6nkx7QjQzGrWuJbupv+0o1cgnY2i4= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/celestiaorg/utils v0.1.0 h1:WsP3O8jF7jKRgLNFmlDCwdThwOFMFxg0MnqhkLFVxPo= github.com/celestiaorg/utils v0.1.0/go.mod h1:vQTh7MHnvpIeCQZ2/Ph+w7K1R2UerDheZbgJEJD2hSU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -123,6 +127,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -428,6 +434,12 @@ github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= diff --git a/tools/da-debug/go.mod b/tools/da-debug/go.mod index 701ac2e0ad..b661709f24 100644 --- a/tools/da-debug/go.mod +++ b/tools/da-debug/go.mod @@ -11,7 +11,10 @@ require ( ) require ( - github.com/celestiaorg/go-header v0.7.3 // indirect + github.com/celestiaorg/go-header v0.7.4 // indirect + github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 // indirect + github.com/celestiaorg/go-square/v3 v3.0.2 // indirect + github.com/celestiaorg/nmt v0.24.2 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/filecoin-project/go-jsonrpc v0.9.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -47,6 +50,7 @@ require ( go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.45.0 // indirect golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/sync v0.18.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect lukechampine.com/blake3 v1.4.1 // indirect diff --git a/tools/da-debug/go.sum b/tools/da-debug/go.sum index 92401272ae..a78ab21bf0 100644 --- a/tools/da-debug/go.sum +++ b/tools/da-debug/go.sum @@ -4,8 +4,14 @@ github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/celestiaorg/go-header v0.7.3 h1:3+kIa+YXT789gPGRh3a55qmdYq3yTTBIqTyum26AvN0= -github.com/celestiaorg/go-header v0.7.3/go.mod h1:eX9iTSPthVEAlEDLux40ZT/olXPGhpxHd+mEzJeDhd0= +github.com/celestiaorg/go-header v0.7.4 h1:kQx3bVvKV+H2etxRi4IUuby5VQydBONx3giHFXDcZ/o= +github.com/celestiaorg/go-header v0.7.4/go.mod h1:eX9iTSPthVEAlEDLux40ZT/olXPGhpxHd+mEzJeDhd0= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076 h1:PYInrsYzrDIsZW9Yb86OTi2aEKuPcpgJt6Mc0Jlc/yg= +github.com/celestiaorg/go-square/merkle v0.0.0-20240117232118-fd78256df076/go.mod h1:hlidgivKyvv7m4Yl2Fdf2mSTmazZYxX8+bnr5IQrI98= +github.com/celestiaorg/go-square/v3 v3.0.2 h1:eSQOgNII8inK9IhiBZ+6GADQeWbRq4HYY72BOgcduA4= +github.com/celestiaorg/go-square/v3 v3.0.2/go.mod h1:oFReMLsSDMRs82ICFEeFQFCqNvwdsbIM1BzCcb0f7dM= +github.com/celestiaorg/nmt v0.24.2 h1:LlpJSPOd6/Lw1Ig6HUhZuqiINHLka/ZSRTBzlNJpchg= +github.com/celestiaorg/nmt v0.24.2/go.mod h1:vgLBpWBi8F5KLxTdXSwb7AU4NhiIQ1AQRGa+PzdcLEA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -60,6 +66,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -233,6 +241,12 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= From ae6b174b4c0fd507b3db177cefdf2c581a3c6203 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 14:33:04 +0100 Subject: [PATCH 27/35] clean a lot the client --- block/internal/da/client.go | 264 ---------- block/internal/da/client_test.go | 458 ------------------ block/internal/submitting/da_submitter.go | 102 +--- .../submitting/da_submitter_mocks_test.go | 57 ++- block/internal/syncing/da_retriever.go | 108 +---- block/internal/syncing/da_retriever_test.go | 93 ++-- block/public.go | 24 - da/celestia/client.go | 284 +++++++---- da/cmd/local-da/local.go | 220 +++++---- da/cmd/local-da/server.go | 26 +- da/da.go | 38 +- da/testing.go | 165 +++++-- da/testing_test.go | 21 +- test/mocks/da.go | 94 ++-- 14 files changed, 675 insertions(+), 1279 deletions(-) delete mode 100644 block/internal/da/client.go delete mode 100644 block/internal/da/client_test.go diff --git a/block/internal/da/client.go b/block/internal/da/client.go deleted file mode 100644 index 89b5a3952d..0000000000 --- a/block/internal/da/client.go +++ /dev/null @@ -1,264 +0,0 @@ -// Package da provides a reusable wrapper around the core DA interface -// with common configuration for namespace handling and timeouts. -package da - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/rs/zerolog" - - coreda "github.com/evstack/ev-node/da" -) - -// Client is the interface representing the DA client. -type Client interface { - Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) coreda.ResultSubmit - Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve - RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve - RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve - - GetHeaderNamespace() []byte - GetDataNamespace() []byte - GetDA() coreda.DA -} - -// client provides a reusable wrapper around the core DA interface -// with common configuration for namespace handling and timeouts. -type client struct { - da coreda.DA - logger zerolog.Logger - defaultTimeout time.Duration - namespaceBz []byte - namespaceDataBz []byte -} - -// Config contains configuration for the DA client. -type Config struct { - DA coreda.DA - Logger zerolog.Logger - DefaultTimeout time.Duration - Namespace string - DataNamespace string -} - -// NewClient creates a new DA client with pre-calculated namespace bytes. -func NewClient(cfg Config) *client { - if cfg.DefaultTimeout == 0 { - cfg.DefaultTimeout = 30 * time.Second - } - - return &client{ - da: cfg.DA, - logger: cfg.Logger.With().Str("component", "da_client").Logger(), - defaultTimeout: cfg.DefaultTimeout, - namespaceBz: coreda.NamespaceFromString(cfg.Namespace).Bytes(), - namespaceDataBz: coreda.NamespaceFromString(cfg.DataNamespace).Bytes(), - } -} - -// Submit submits blobs to the DA layer with the specified options. -func (c *client) Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) coreda.ResultSubmit { - ids, err := c.da.SubmitWithOptions(ctx, data, gasPrice, namespace, options) - - // calculate blob size - var blobSize uint64 - for _, blob := range data { - blobSize += uint64(len(blob)) - } - - // Handle errors returned by Submit - if err != nil { - if errors.Is(err, context.Canceled) { - c.logger.Debug().Msg("DA submission canceled due to context cancellation") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusContextCanceled, - Message: "submission canceled", - IDs: ids, - BlobSize: blobSize, - }, - } - } - status := coreda.StatusError - switch { - case errors.Is(err, coreda.ErrTxTimedOut): - status = coreda.StatusNotIncludedInBlock - case errors.Is(err, coreda.ErrTxAlreadyInMempool): - status = coreda.StatusAlreadyInMempool - case errors.Is(err, coreda.ErrTxIncorrectAccountSequence): - status = coreda.StatusIncorrectAccountSequence - case errors.Is(err, coreda.ErrBlobSizeOverLimit): - status = coreda.StatusTooBig - case errors.Is(err, coreda.ErrContextDeadline): - status = coreda.StatusContextDeadline - } - - // Use debug level for StatusTooBig as it gets handled later in submitToDA through recursive splitting - if status == coreda.StatusTooBig { - c.logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") - } else { - c.logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") - } - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: status, - Message: "failed to submit blobs: " + err.Error(), - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: 0, - Timestamp: time.Now(), - BlobSize: blobSize, - }, - } - } - - if len(ids) == 0 && len(data) > 0 { - c.logger.Warn().Msg("DA submission returned no IDs for non-empty input data") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: "failed to submit blobs: no IDs returned despite non-empty input", - }, - } - } - - // Get height from the first ID - var height uint64 - if len(ids) > 0 { - height, _, err = coreda.SplitID(ids[0]) - if err != nil { - c.logger.Error().Err(err).Msg("failed to split ID") - } - } - - c.logger.Debug().Int("num_ids", len(ids)).Msg("DA submission successful") - return coreda.ResultSubmit{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: height, - BlobSize: blobSize, - Timestamp: time.Now(), - }, - } -} - -// Retrieve retrieves blobs from the DA layer at the specified height and namespace. -func (c *client) Retrieve(ctx context.Context, height uint64, namespace []byte) coreda.ResultRetrieve { - // 1. Get IDs - getIDsCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) - defer cancel() - idsResult, err := c.da.GetIDs(getIDsCtx, height, namespace) - if err != nil { - // Handle specific "not found" error - if strings.Contains(err.Error(), coreda.ErrBlobNotFound.Error()) { - c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), - Height: height, - Timestamp: time.Now(), - }, - } - } - if strings.Contains(err.Error(), coreda.ErrHeightFromFuture.Error()) { - c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusHeightFromFuture, - Message: coreda.ErrHeightFromFuture.Error(), - Height: height, - Timestamp: time.Now(), - }, - } - } - // Handle other errors during GetIDs - c.logger.Error().Uint64("height", height).Err(err).Msg("Failed to get IDs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: fmt.Sprintf("failed to get IDs: %s", err.Error()), - Height: height, - Timestamp: time.Now(), - }, - } - } - - // This check should technically be redundant if GetIDs correctly returns ErrBlobNotFound - if idsResult == nil || len(idsResult.IDs) == 0 { - c.logger.Debug().Uint64("height", height).Msg("No IDs found at height") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusNotFound, - Message: coreda.ErrBlobNotFound.Error(), - Height: height, - Timestamp: time.Now(), - }, - } - } - // 2. Get Blobs using the retrieved IDs in batches - batchSize := 100 - blobs := make([][]byte, 0, len(idsResult.IDs)) - for i := 0; i < len(idsResult.IDs); i += batchSize { - end := min(i+batchSize, len(idsResult.IDs)) - - getBlobsCtx, cancel := context.WithTimeout(ctx, c.defaultTimeout) - batchBlobs, err := c.da.Get(getBlobsCtx, idsResult.IDs[i:end], namespace) - cancel() - if err != nil { - // Handle errors during Get - c.logger.Error().Uint64("height", height).Int("num_ids", len(idsResult.IDs)).Err(err).Msg("Failed to get blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusError, - Message: fmt.Sprintf("failed to get blobs for batch %d-%d: %s", i, end-1, err.Error()), - Height: height, - Timestamp: time.Now(), - }, - } - } - blobs = append(blobs, batchBlobs...) - } - // Success - c.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("Successfully retrieved blobs") - return coreda.ResultRetrieve{ - BaseResult: coreda.BaseResult{ - Code: coreda.StatusSuccess, - Height: height, - IDs: idsResult.IDs, - Timestamp: idsResult.Timestamp, - }, - Data: blobs, - } -} - -// RetrieveHeaders retrieves blobs from the header namespace at the specified height. -func (c *client) RetrieveHeaders(ctx context.Context, height uint64) coreda.ResultRetrieve { - return c.Retrieve(ctx, height, c.namespaceBz) -} - -// RetrieveData retrieves blobs from the data namespace at the specified height. -func (c *client) RetrieveData(ctx context.Context, height uint64) coreda.ResultRetrieve { - return c.Retrieve(ctx, height, c.namespaceDataBz) -} - -// GetHeaderNamespace returns the header namespace bytes. -func (c *client) GetHeaderNamespace() []byte { - return c.namespaceBz -} - -// GetDataNamespace returns the data namespace bytes. -func (c *client) GetDataNamespace() []byte { - return c.namespaceDataBz -} - -// GetDA returns the underlying DA interface for advanced usage. -func (c *client) GetDA() coreda.DA { - return c.da -} diff --git a/block/internal/da/client_test.go b/block/internal/da/client_test.go deleted file mode 100644 index d389bf68e7..0000000000 --- a/block/internal/da/client_test.go +++ /dev/null @@ -1,458 +0,0 @@ -package da - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/rs/zerolog" - "gotest.tools/v3/assert" - - coreda "github.com/evstack/ev-node/da" -) - -// mockDA is a simple mock implementation of coreda.DA for testing -type mockDA struct { - submitFunc func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte) ([]coreda.ID, error) - submitWithOptions func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) - getIDsFunc func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) - getFunc func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) -} - -func (m *mockDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte) ([]coreda.ID, error) { - if m.submitFunc != nil { - return m.submitFunc(ctx, blobs, gasPrice, namespace) - } - return nil, nil -} - -func (m *mockDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) { - if m.submitWithOptions != nil { - return m.submitWithOptions(ctx, blobs, gasPrice, namespace, options) - } - return nil, nil -} - -func (m *mockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - if m.getIDsFunc != nil { - return m.getIDsFunc(ctx, height, namespace) - } - return nil, errors.New("not implemented") -} - -func (m *mockDA) Get(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { - if m.getFunc != nil { - return m.getFunc(ctx, ids, namespace) - } - return nil, errors.New("not implemented") -} - -func (m *mockDA) GetProofs(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Proof, error) { - return nil, errors.New("not implemented") -} - -func (m *mockDA) Commit(ctx context.Context, blobs []coreda.Blob, namespace []byte) ([]coreda.Commitment, error) { - return nil, errors.New("not implemented") -} - -func (m *mockDA) Validate(ctx context.Context, ids []coreda.ID, proofs []coreda.Proof, namespace []byte) ([]bool, error) { - return nil, errors.New("not implemented") -} - -func TestNewClient(t *testing.T) { - tests := []struct { - name string - cfg Config - }{ - { - name: "with all namespaces", - cfg: Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - DefaultTimeout: 5 * time.Second, - Namespace: "test-ns", - DataNamespace: "test-data-ns", - }, - }, - { - name: "without forced inclusion namespace", - cfg: Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - DefaultTimeout: 5 * time.Second, - Namespace: "test-ns", - DataNamespace: "test-data-ns", - }, - }, - { - name: "with default timeout", - cfg: Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - Namespace: "test-ns", - DataNamespace: "test-data-ns", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client := NewClient(tt.cfg) - assert.Assert(t, client != nil) - assert.Assert(t, client.da != nil) - assert.Assert(t, len(client.namespaceBz) > 0) - assert.Assert(t, len(client.namespaceDataBz) > 0) - - expectedTimeout := tt.cfg.DefaultTimeout - if expectedTimeout == 0 { - expectedTimeout = 30 * time.Second - } - assert.Equal(t, client.defaultTimeout, expectedTimeout) - }) - } -} - -func TestClient_GetNamespaces(t *testing.T) { - cfg := Config{ - DA: &mockDA{}, - Logger: zerolog.Nop(), - Namespace: "test-header", - DataNamespace: "test-data", - } - - client := NewClient(cfg) - - headerNs := client.GetHeaderNamespace() - assert.Assert(t, len(headerNs) > 0) - - dataNs := client.GetDataNamespace() - assert.Assert(t, len(dataNs) > 0) - - // Namespaces should be different - assert.Assert(t, string(headerNs) != string(dataNs)) -} - -func TestClient_GetDA(t *testing.T) { - mockDAInstance := &mockDA{} - cfg := Config{ - DA: mockDAInstance, - Logger: zerolog.Nop(), - Namespace: "test-ns", - DataNamespace: "test-data-ns", - } - - client := NewClient(cfg) - da := client.GetDA() - assert.Equal(t, da, mockDAInstance) -} - -func TestClient_Submit(t *testing.T) { - logger := zerolog.Nop() - - testCases := []struct { - name string - data [][]byte - gasPrice float64 - options []byte - submitErr error - submitIDs [][]byte - expectedCode coreda.StatusCode - expectedErrMsg string - expectedIDs [][]byte - expectedCount uint64 - }{ - { - name: "successful submission", - data: [][]byte{[]byte("blob1"), []byte("blob2")}, - gasPrice: 1.0, - options: []byte("opts"), - submitIDs: [][]byte{[]byte("id1"), []byte("id2")}, - expectedCode: coreda.StatusSuccess, - expectedIDs: [][]byte{[]byte("id1"), []byte("id2")}, - expectedCount: 2, - }, - { - name: "context canceled error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: context.Canceled, - expectedCode: coreda.StatusContextCanceled, - expectedErrMsg: "submission canceled", - }, - { - name: "tx timed out error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxTimedOut, - expectedCode: coreda.StatusNotIncludedInBlock, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxTimedOut.Error(), - }, - { - name: "tx already in mempool error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxAlreadyInMempool, - expectedCode: coreda.StatusAlreadyInMempool, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxAlreadyInMempool.Error(), - }, - { - name: "incorrect account sequence error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrTxIncorrectAccountSequence, - expectedCode: coreda.StatusIncorrectAccountSequence, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrTxIncorrectAccountSequence.Error(), - }, - { - name: "blob size over limit error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrBlobSizeOverLimit, - expectedCode: coreda.StatusTooBig, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrBlobSizeOverLimit.Error(), - }, - { - name: "context deadline error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: coreda.ErrContextDeadline, - expectedCode: coreda.StatusContextDeadline, - expectedErrMsg: "failed to submit blobs: " + coreda.ErrContextDeadline.Error(), - }, - { - name: "generic submission error", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitErr: errors.New("some generic error"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to submit blobs: some generic error", - }, - { - name: "no IDs returned for non-empty data", - data: [][]byte{[]byte("blob1")}, - gasPrice: 1.0, - options: []byte("opts"), - submitIDs: [][]byte{}, - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to submit blobs: no IDs returned despite non-empty input", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - mockDAInstance := &mockDA{ - submitWithOptions: func(ctx context.Context, blobs []coreda.Blob, gasPrice float64, namespace []byte, options []byte) ([]coreda.ID, error) { - return tc.submitIDs, tc.submitErr - }, - } - - client := NewClient(Config{ - DA: mockDAInstance, - Logger: logger, - Namespace: "test-namespace", - DataNamespace: "test-data-namespace", - }) - - encodedNamespace := coreda.NamespaceFromString("test-namespace") - result := client.Submit(context.Background(), tc.data, tc.gasPrice, encodedNamespace.Bytes(), tc.options) - - assert.Equal(t, tc.expectedCode, result.Code) - if tc.expectedErrMsg != "" { - assert.Assert(t, result.Message != "") - } - if tc.expectedIDs != nil { - assert.Equal(t, len(tc.expectedIDs), len(result.IDs)) - } - if tc.expectedCount != 0 { - assert.Equal(t, tc.expectedCount, result.SubmittedCount) - } - }) - } -} - -func TestClient_Retrieve(t *testing.T) { - logger := zerolog.Nop() - dataLayerHeight := uint64(100) - mockIDs := [][]byte{[]byte("id1"), []byte("id2")} - mockBlobs := [][]byte{[]byte("blobA"), []byte("blobB")} - mockTimestamp := time.Now() - - testCases := []struct { - name string - getIDsResult *coreda.GetIDsResult - getIDsErr error - getBlobsErr error - expectedCode coreda.StatusCode - expectedErrMsg string - expectedIDs [][]byte - expectedData [][]byte - expectedHeight uint64 - }{ - { - name: "successful retrieval", - getIDsResult: &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, - expectedCode: coreda.StatusSuccess, - expectedIDs: mockIDs, - expectedData: mockBlobs, - expectedHeight: dataLayerHeight, - }, - { - name: "blob not found error during GetIDs", - getIDsErr: coreda.ErrBlobNotFound, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "height from future error during GetIDs", - getIDsErr: coreda.ErrHeightFromFuture, - expectedCode: coreda.StatusHeightFromFuture, - expectedErrMsg: coreda.ErrHeightFromFuture.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "generic error during GetIDs", - getIDsErr: errors.New("failed to connect to DA"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to get IDs: failed to connect to DA", - expectedHeight: dataLayerHeight, - }, - { - name: "GetIDs returns nil result", - getIDsResult: nil, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "GetIDs returns empty IDs", - getIDsResult: &coreda.GetIDsResult{ - IDs: [][]byte{}, - Timestamp: mockTimestamp, - }, - expectedCode: coreda.StatusNotFound, - expectedErrMsg: coreda.ErrBlobNotFound.Error(), - expectedHeight: dataLayerHeight, - }, - { - name: "error during Get (blobs retrieval)", - getIDsResult: &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, - getBlobsErr: errors.New("network error during blob retrieval"), - expectedCode: coreda.StatusError, - expectedErrMsg: "failed to get blobs for batch 0-1: network error during blob retrieval", - expectedHeight: dataLayerHeight, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - mockDAInstance := &mockDA{ - getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - return tc.getIDsResult, tc.getIDsErr - }, - getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { - if tc.getBlobsErr != nil { - return nil, tc.getBlobsErr - } - return mockBlobs, nil - }, - } - - client := NewClient(Config{ - DA: mockDAInstance, - Logger: logger, - Namespace: "test-namespace", - DataNamespace: "test-data-namespace", - DefaultTimeout: 5 * time.Second, - }) - - encodedNamespace := coreda.NamespaceFromString("test-namespace") - result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) - - assert.Equal(t, tc.expectedCode, result.Code) - assert.Equal(t, tc.expectedHeight, result.Height) - if tc.expectedErrMsg != "" { - assert.Assert(t, result.Message != "") - } - if tc.expectedIDs != nil { - assert.Equal(t, len(tc.expectedIDs), len(result.IDs)) - } - if tc.expectedData != nil { - assert.Equal(t, len(tc.expectedData), len(result.Data)) - } - }) - } -} - -func TestClient_Retrieve_Timeout(t *testing.T) { - logger := zerolog.Nop() - dataLayerHeight := uint64(100) - encodedNamespace := coreda.NamespaceFromString("test-namespace") - - t.Run("timeout during GetIDs", func(t *testing.T) { - mockDAInstance := &mockDA{ - getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - <-ctx.Done() // Wait for context cancellation - return nil, context.DeadlineExceeded - }, - } - - client := NewClient(Config{ - DA: mockDAInstance, - Logger: logger, - Namespace: "test-namespace", - DataNamespace: "test-data-namespace", - DefaultTimeout: 1 * time.Millisecond, - }) - - result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) - - assert.Equal(t, coreda.StatusError, result.Code) - assert.Assert(t, result.Message != "") - }) - - t.Run("timeout during Get", func(t *testing.T) { - mockIDs := [][]byte{[]byte("id1")} - mockTimestamp := time.Now() - - mockDAInstance := &mockDA{ - getIDsFunc: func(ctx context.Context, height uint64, namespace []byte) (*coreda.GetIDsResult, error) { - return &coreda.GetIDsResult{ - IDs: mockIDs, - Timestamp: mockTimestamp, - }, nil - }, - getFunc: func(ctx context.Context, ids []coreda.ID, namespace []byte) ([]coreda.Blob, error) { - <-ctx.Done() // Wait for context cancellation - return nil, context.DeadlineExceeded - }, - } - - client := NewClient(Config{ - DA: mockDAInstance, - Logger: logger, - Namespace: "test-namespace", - DataNamespace: "test-data-namespace", - DefaultTimeout: 1 * time.Millisecond, - }) - - result := client.Retrieve(context.Background(), dataLayerHeight, encodedNamespace.Bytes()) - - assert.Equal(t, coreda.StatusError, result.Code) - assert.Assert(t, result.Message != "") - }) -} diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index a65fde4992..29811af4db 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/json" - "errors" "fmt" "time" @@ -27,103 +26,6 @@ const ( initialBackoff = 100 * time.Millisecond ) -// submitWithHelpers performs blob submission using the underlying DA layer, -// handling error mapping to produce a ResultSubmit. -func submitWithHelpers( - ctx context.Context, - daLayer dapkg.DA, - logger zerolog.Logger, - data [][]byte, - gasPrice float64, - namespace []byte, - options []byte, -) dapkg.ResultSubmit { - ids, err := daLayer.SubmitWithOptions(ctx, data, gasPrice, namespace, options) - - // calculate blob size - var blobSize uint64 - for _, blob := range data { - blobSize += uint64(len(blob)) - } - - // Handle errors returned by Submit - if err != nil { - if errors.Is(err, context.Canceled) { - logger.Debug().Msg("DA submission canceled via helper due to context cancellation") - return dapkg.ResultSubmit{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusContextCanceled, - Message: "submission canceled", - IDs: ids, - BlobSize: blobSize, - }, - } - } - status := dapkg.StatusError - switch { - case errors.Is(err, dapkg.ErrTxTimedOut): - status = dapkg.StatusNotIncludedInBlock - case errors.Is(err, dapkg.ErrTxAlreadyInMempool): - status = dapkg.StatusAlreadyInMempool - case errors.Is(err, dapkg.ErrTxIncorrectAccountSequence): - status = dapkg.StatusIncorrectAccountSequence - case errors.Is(err, dapkg.ErrBlobSizeOverLimit): - status = dapkg.StatusTooBig - case errors.Is(err, dapkg.ErrContextDeadline): - status = dapkg.StatusContextDeadline - } - - // Use debug level for StatusTooBig as it gets handled later in submitToDA through recursive splitting - if status == dapkg.StatusTooBig { - logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed via helper") - } else { - logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed via helper") - } - return dapkg.ResultSubmit{ - BaseResult: dapkg.BaseResult{ - Code: status, - Message: "failed to submit blobs: " + err.Error(), - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: 0, - Timestamp: time.Now(), - BlobSize: blobSize, - }, - } - } - - if len(ids) == 0 && len(data) > 0 { - logger.Warn().Msg("DA submission via helper returned no IDs for non-empty input data") - return dapkg.ResultSubmit{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusError, - Message: "failed to submit blobs: no IDs returned despite non-empty input", - }, - } - } - - // Get height from the first ID - var height uint64 - if len(ids) > 0 { - height, _, err = dapkg.SplitID(ids[0]) - if err != nil { - logger.Error().Err(err).Msg("failed to split ID") - } - } - - logger.Debug().Int("num_ids", len(ids)).Msg("DA submission successful via helper") - return dapkg.ResultSubmit{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusSuccess, - IDs: ids, - SubmittedCount: uint64(len(ids)), - Height: height, - BlobSize: blobSize, - Timestamp: time.Now(), - }, - } -} - // retryPolicy defines clamped bounds for retries and backoff. type retryPolicy struct { MaxAttempts int @@ -509,8 +411,8 @@ func submitToDA[T any]( // Perform submission start := time.Now() - res := submitWithHelpers(submitCtx, s.da, s.logger, marshaled, -1, namespace, mergedOptions) - s.logger.Debug().Int("attempts", rs.Attempt).Dur("elapsed", time.Since(start)).Uint64("code", uint64(res.Code)).Msg("got SubmitWithHelpers response from celestia") + res := s.da.SubmitWithOptions(submitCtx, marshaled, -1, namespace, mergedOptions) + s.logger.Debug().Int("attempts", rs.Attempt).Dur("elapsed", time.Since(start)).Uint64("code", uint64(res.Code)).Msg("got SubmitWithOptions response from Celestia layer") // Record submission result for observability if daVisualizationServer := server.GetDAVisualizationServer(); daVisualizationServer != nil { diff --git a/block/internal/submitting/da_submitter_mocks_test.go b/block/internal/submitting/da_submitter_mocks_test.go index 7efce331d5..0cbc0ae2fd 100644 --- a/block/internal/submitting/da_submitter_mocks_test.go +++ b/block/internal/submitting/da_submitter_mocks_test.go @@ -2,7 +2,6 @@ package submitting import ( "context" - "errors" "testing" "time" @@ -34,6 +33,27 @@ func newTestSubmitter(mockDA *mocks.MockDA, override func(*config.Config)) *DASu // marshal helper for simple items func marshalString(s string) ([]byte, error) { return []byte(s), nil } +// helper to create a ResultSubmit for errors +func errorResult(code da.StatusCode, msg string) da.ResultSubmit { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: code, + Message: msg, + }, + } +} + +// helper to create a ResultSubmit for success +func successResult(ids []da.ID) da.ResultSubmit { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + }, + } +} + func TestSubmitToDA_MempoolRetry_IncreasesGasAndSucceeds(t *testing.T) { t.Parallel() @@ -42,21 +62,24 @@ func TestSubmitToDA_MempoolRetry_IncreasesGasAndSucceeds(t *testing.T) { nsBz := da.NamespaceFromString("ns").Bytes() opts := []byte("opts") var usedGas []float64 + + // First attempt: timeout error (mapped to StatusNotIncludedInBlock) mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(nil, da.ErrTxTimedOut). + Return(errorResult(da.StatusNotIncludedInBlock, "timeout")). Once() - ids := [][]byte{[]byte("id1"), []byte("id2"), []byte("id3")} + // Second attempt: success + ids := []da.ID{[]byte("id1"), []byte("id2"), []byte("id3")} mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(ids, nil). + Return(successResult(ids)). Once() s := newTestSubmitter(mockDA, nil) @@ -95,15 +118,15 @@ func TestSubmitToDA_UnknownError_RetriesSameGasThenSucceeds(t *testing.T) { mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(nil, errors.New("boom")). + Return(errorResult(da.StatusError, "boom")). Once() // Second attempt: same gas, success - ids := [][]byte{[]byte("id1")} + ids := []da.ID{[]byte("id1")} mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(ids, nil). + Return(successResult(ids)). Once() s := newTestSubmitter(mockDA, nil) @@ -144,18 +167,18 @@ func TestSubmitToDA_TooBig_HalvesBatch(t *testing.T) { blobs := args.Get(1).([][]byte) batchSizes = append(batchSizes, len(blobs)) }). - Return(nil, da.ErrBlobSizeOverLimit). + Return(errorResult(da.StatusTooBig, "blob too big")). Once() // Second attempt: expect half the size, succeed - ids := [][]byte{[]byte("id1"), []byte("id2")} + ids := []da.ID{[]byte("id1"), []byte("id2")} mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts). Run(func(args mock.Arguments) { blobs := args.Get(1).([][]byte) batchSizes = append(batchSizes, len(blobs)) }). - Return(ids, nil). + Return(successResult(ids)). Once() s := newTestSubmitter(mockDA, nil) @@ -192,15 +215,15 @@ func TestSubmitToDA_SentinelNoGas_PreservesGasAcrossRetries(t *testing.T) { mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(nil, da.ErrTxAlreadyInMempool). + Return(errorResult(da.StatusAlreadyInMempool, "already in mempool")). Once() // Second attempt: should use same sentinel gas (-1), succeed - ids := [][]byte{[]byte("id1")} + ids := []da.ID{[]byte("id1")} mockDA. On("SubmitWithOptions", mock.Anything, mock.Anything, mock.AnythingOfType("float64"), nsBz, opts). Run(func(args mock.Arguments) { usedGas = append(usedGas, args.Get(2).(float64)) }). - Return(ids, nil). + Return(successResult(ids)). Once() s := newTestSubmitter(mockDA, nil) @@ -235,12 +258,12 @@ func TestSubmitToDA_PartialSuccess_AdvancesWindow(t *testing.T) { var totalSubmitted int // First attempt: success for first 2 of 3 - firstIDs := [][]byte{[]byte("id1"), []byte("id2")} - mockDA.On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts).Return(firstIDs, nil).Once() + firstIDs := []da.ID{[]byte("id1"), []byte("id2")} + mockDA.On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts).Return(successResult(firstIDs)).Once() // Second attempt: success for remaining 1 - secondIDs := [][]byte{[]byte("id3")} - mockDA.On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts).Return(secondIDs, nil).Once() + secondIDs := []da.ID{[]byte("id3")} + mockDA.On("SubmitWithOptions", mock.Anything, mock.Anything, mock.Anything, nsBz, opts).Return(successResult(secondIDs)).Once() s := newTestSubmitter(mockDA, nil) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index ae169e0fa3..274d1ad33b 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -5,8 +5,6 @@ import ( "context" "errors" "fmt" - "strings" - "time" "github.com/rs/zerolog" "google.golang.org/protobuf/proto" @@ -20,108 +18,6 @@ import ( pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) -// defaultDATimeout is the default timeout for DA retrieval operations -const defaultDATimeout = 10 * time.Second - -// retrieveWithHelpers performs blob retrieval using the underlying DA layer, -// handling error mapping to produce a ResultRetrieve. -func retrieveWithHelpers( - ctx context.Context, - daLayer dapkg.DA, - logger zerolog.Logger, - dataLayerHeight uint64, - namespace []byte, - requestTimeout time.Duration, -) dapkg.ResultRetrieve { - // 1. Get IDs - getIDsCtx, cancel := context.WithTimeout(ctx, requestTimeout) - defer cancel() - idsResult, err := daLayer.GetIDs(getIDsCtx, dataLayerHeight, namespace) - if err != nil { - // Handle specific "not found" error - if strings.Contains(err.Error(), dapkg.ErrBlobNotFound.Error()) { - logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: Blobs not found at height") - return dapkg.ResultRetrieve{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusNotFound, - Message: dapkg.ErrBlobNotFound.Error(), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - if strings.Contains(err.Error(), dapkg.ErrHeightFromFuture.Error()) { - logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: Blobs not found at height") - return dapkg.ResultRetrieve{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusHeightFromFuture, - Message: dapkg.ErrHeightFromFuture.Error(), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - // Handle other errors during GetIDs - logger.Error().Uint64("height", dataLayerHeight).Err(err).Msg("Retrieve helper: Failed to get IDs") - return dapkg.ResultRetrieve{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusError, - Message: fmt.Sprintf("failed to get IDs: %s", err.Error()), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - - // This check should technically be redundant if GetIDs correctly returns ErrBlobNotFound - if idsResult == nil || len(idsResult.IDs) == 0 { - logger.Debug().Uint64("height", dataLayerHeight).Msg("Retrieve helper: No IDs found at height") - return dapkg.ResultRetrieve{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusNotFound, - Message: dapkg.ErrBlobNotFound.Error(), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - // 2. Get Blobs using the retrieved IDs in batches - batchSize := 100 - blobs := make([][]byte, 0, len(idsResult.IDs)) - for i := 0; i < len(idsResult.IDs); i += batchSize { - end := min(i+batchSize, len(idsResult.IDs)) - - getBlobsCtx, cancel := context.WithTimeout(ctx, requestTimeout) - batchBlobs, err := daLayer.Get(getBlobsCtx, idsResult.IDs[i:end], namespace) - cancel() - if err != nil { - // Handle errors during Get - logger.Error().Uint64("height", dataLayerHeight).Int("num_ids", len(idsResult.IDs)).Err(err).Msg("Retrieve helper: Failed to get blobs") - return dapkg.ResultRetrieve{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusError, - Message: fmt.Sprintf("failed to get blobs for batch %d-%d: %s", i, end-1, err.Error()), - Height: dataLayerHeight, - Timestamp: time.Now(), - }, - } - } - blobs = append(blobs, batchBlobs...) - } - - // Success - logger.Debug().Uint64("height", dataLayerHeight).Int("num_blobs", len(blobs)).Msg("Successfully retrieved blobs") - return dapkg.ResultRetrieve{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusSuccess, - Height: dataLayerHeight, - IDs: idsResult.IDs, - Timestamp: idsResult.Timestamp, - }, - Data: blobs, - } -} - // DARetriever handles DA retrieval operations for syncing type DARetriever struct { da dapkg.DA @@ -179,14 +75,14 @@ func (r *DARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co // fetchBlobs retrieves blobs from the DA layer func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (dapkg.ResultRetrieve, error) { // Retrieve from both namespaces - headerRes := retrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceBz, defaultDATimeout) + headerRes := r.da.Retrieve(ctx, daHeight, r.namespaceBz) // If namespaces are the same, return header result if bytes.Equal(r.namespaceBz, r.namespaceDataBz) { return headerRes, r.validateBlobResponse(headerRes, daHeight) } - dataRes := retrieveWithHelpers(ctx, r.da, r.logger, daHeight, r.namespaceDataBz, defaultDATimeout) + dataRes := r.da.Retrieve(ctx, daHeight, r.namespaceDataBz) // Validate responses headerErr := r.validateBlobResponse(headerRes, daHeight) diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index ee514255ea..4e9afed466 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "fmt" "testing" "time" @@ -56,8 +55,13 @@ func TestDARetriever_RetrieveFromDA_Invalid(t *testing.T) { mockDA := testmocks.NewMockDA(t) - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, errors.New("just invalid")).Maybe() + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: "just invalid", + }, + }).Maybe() r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) events, err := r.RetrieveFromDA(context.Background(), 42) @@ -73,9 +77,14 @@ func TestDARetriever_RetrieveFromDA_NotFound(t *testing.T) { mockDA := testmocks.NewMockDA(t) - // GetIDs returns ErrBlobNotFound -> helper maps to StatusNotFound - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, fmt.Errorf("%s: whatever", da.ErrBlobNotFound.Error())).Maybe() + // Retrieve returns StatusNotFound + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusNotFound, + Message: da.ErrBlobNotFound.Error(), + }, + }).Maybe() r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) events, err := r.RetrieveFromDA(context.Background(), 42) @@ -90,9 +99,14 @@ func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { require.NoError(t, err) mockDA := testmocks.NewMockDA(t) - // GetIDs returns ErrHeightFromFuture -> helper maps to StatusHeightFromFuture, fetchBlobs returns error - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, fmt.Errorf("%s: later", da.ErrHeightFromFuture.Error())).Maybe() + // Retrieve returns StatusHeightFromFuture + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusHeightFromFuture, + Message: da.ErrHeightFromFuture.Error(), + }, + }).Maybe() r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) events, derr := r.RetrieveFromDA(context.Background(), 1000) @@ -109,28 +123,24 @@ func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { mockDA := testmocks.NewMockDA(t) - // Mock GetIDs to hang longer than the timeout - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Run(func(ctx context.Context, height uint64, namespace []byte) { - <-ctx.Done() - }). - Return(nil, context.DeadlineExceeded).Maybe() + // Mock Retrieve to return timeout error + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: "failed to get IDs: context deadline exceeded", + }, + }).Maybe() r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) - start := time.Now() events, err := r.RetrieveFromDA(context.Background(), 42) - duration := time.Since(start) // Verify error is returned and contains deadline exceeded information require.Error(t, err) assert.Contains(t, err.Error(), "DA retrieval failed") assert.Contains(t, err.Error(), "context deadline exceeded") assert.Len(t, events, 0) - - // Verify timeout occurred approximately at expected time (with some tolerance) - assert.Greater(t, duration, 9*time.Second, "should timeout after approximately 10 seconds") - assert.Less(t, duration, 12*time.Second, "should not take much longer than timeout") } func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { @@ -141,9 +151,14 @@ func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { mockDA := testmocks.NewMockDA(t) - // Mock GetIDs to immediately return context deadline exceeded - mockDA.EXPECT().GetIDs(mock.Anything, mock.Anything, mock.Anything). - Return(nil, context.DeadlineExceeded).Maybe() + // Mock Retrieve to immediately return context deadline exceeded + mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: "failed to get IDs: context deadline exceeded", + }, + }).Maybe() r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) @@ -289,16 +304,26 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { namespaceDataBz := da.NamespaceFromString(cfg.DA.GetDataNamespace()).Bytes() mockDA := testmocks.NewMockDA(t) - // Expect GetIDs for both namespaces - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceBz) })). - Return(&da.GetIDsResult{IDs: [][]byte{[]byte("h1")}, Timestamp: time.Now()}, nil).Once() - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceBz) })). - Return([][]byte{hdrBin}, nil).Once() - - mockDA.EXPECT().GetIDs(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). - Return(&da.GetIDsResult{IDs: [][]byte{[]byte("d1")}, Timestamp: time.Now()}, nil).Once() - mockDA.EXPECT().Get(mock.Anything, mock.Anything, mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). - Return([][]byte{dataBin}, nil).Once() + // Expect Retrieve for both namespaces + mockDA.EXPECT().Retrieve(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceBz) })). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: [][]byte{[]byte("h1")}, + Timestamp: time.Now(), + }, + Data: [][]byte{hdrBin}, + }).Once() + + mockDA.EXPECT().Retrieve(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). + Return(da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: [][]byte{[]byte("d1")}, + Timestamp: time.Now(), + }, + Data: [][]byte{dataBin}, + }).Once() r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) diff --git a/block/public.go b/block/public.go index 102a29ebef..04e20ff07c 100644 --- a/block/public.go +++ b/block/public.go @@ -1,13 +1,7 @@ package block import ( - "time" - "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/block/internal/da" - coreda "github.com/evstack/ev-node/da" - "github.com/evstack/ev-node/pkg/config" - "github.com/rs/zerolog" ) // BlockOptions defines the options for creating block components @@ -30,21 +24,3 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { func NopMetrics() *Metrics { return common.NopMetrics() } - -// DAClient is the interface representing the DA client for public use. -type DAClient = da.Client - -// NewDAClient creates a new DA client with configuration -func NewDAClient( - daLayer coreda.DA, - config config.Config, - logger zerolog.Logger, -) DAClient { - return da.NewClient(da.Config{ - DA: daLayer, - Logger: logger, - DefaultTimeout: 10 * time.Second, - Namespace: config.DA.GetNamespace(), - DataNamespace: config.DA.GetDataNamespace(), - }) -} diff --git a/da/celestia/client.go b/da/celestia/client.go index cae660adf3..f01b5b51fe 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "encoding/json" + "errors" "fmt" "net/http" "strings" @@ -15,6 +16,12 @@ import ( "github.com/evstack/ev-node/da" ) +// defaultRetrieveTimeout is the default timeout for DA retrieval operations +const defaultRetrieveTimeout = 10 * time.Second + +// retrieveBatchSize is the number of blobs to retrieve in a single batch +const retrieveBatchSize = 100 + // Client connects to celestia-node's blob API via JSON-RPC and implements the da.DA interface. type Client struct { logger zerolog.Logger @@ -206,69 +213,10 @@ func (c *Client) included(ctx context.Context, height uint64, namespace Namespac // DA interface implementation -// Submit submits blobs to Celestia and returns IDs. -func (c *Client) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error) { +func (c *Client) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) da.ResultSubmit { return c.SubmitWithOptions(ctx, blobs, gasPrice, namespace, nil) } -// SubmitWithOptions submits blobs to Celestia with additional options. -func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { - if len(blobs) == 0 { - return []da.ID{}, nil - } - - // Validate namespace - if err := ValidateNamespace(namespace); err != nil { - return nil, fmt.Errorf("invalid namespace: %w", err) - } - - // Convert blobs to Celestia format and calculate commitments locally - celestiaBlobs := make([]*Blob, len(blobs)) - for i, blob := range blobs { - // Calculate commitment locally using the same algorithm as celestia-node - commitment, err := CreateCommitment(blob, namespace) - if err != nil { - return nil, fmt.Errorf("failed to create commitment for blob %d: %w", i, err) - } - celestiaBlobs[i] = &Blob{ - Namespace: namespace, - Data: blob, - Commitment: commitment, - } - } - - // Parse submit options if provided - var opts *SubmitOptions - if len(options) > 0 { - opts = &SubmitOptions{} - if err := json.Unmarshal(options, opts); err != nil { - return nil, fmt.Errorf("failed to unmarshal submit options: %w", err) - } - opts.Fee = gasPrice - } else { - opts = &SubmitOptions{Fee: gasPrice} - } - - height, err := c.submit(ctx, celestiaBlobs, opts) - if err != nil { - if strings.Contains(err.Error(), "timeout") { - return nil, da.ErrTxTimedOut - } - if strings.Contains(err.Error(), "too large") || strings.Contains(err.Error(), "exceeds") { - return nil, da.ErrBlobSizeOverLimit - } - return nil, err - } - - // Create IDs from height and locally-computed commitments - ids := make([]da.ID, len(celestiaBlobs)) - for i, blob := range celestiaBlobs { - ids[i] = makeID(height, blob.Commitment) - } - - return ids, nil -} - // Get retrieves blobs by their IDs. func (c *Client) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { if len(ids) == 0 { @@ -315,31 +263,14 @@ func (c *Client) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.B return result, nil } -// GetIDs returns all blob IDs at the given height. func (c *Client) GetIDs(ctx context.Context, height uint64, namespace []byte) (*da.GetIDsResult, error) { - blobs, err := c.getAll(ctx, height, []Namespace{namespace}) - if err != nil { - if strings.Contains(err.Error(), "not found") { - return nil, da.ErrBlobNotFound - } - if strings.Contains(err.Error(), "height") && strings.Contains(err.Error(), "future") { - return nil, da.ErrHeightFromFuture - } - return nil, err - } - - if len(blobs) == 0 { - return nil, da.ErrBlobNotFound - } - - ids := make([]da.ID, len(blobs)) - for i, blob := range blobs { - ids[i] = makeID(height, blob.Commitment) + result := c.Retrieve(ctx, height, namespace) + if result.Code != da.StatusSuccess { + return nil, da.StatusCodeToError(result.Code, result.Message) } - return &da.GetIDsResult{ - IDs: ids, - Timestamp: time.Now(), + IDs: result.IDs, + Timestamp: result.Timestamp, }, nil } @@ -413,3 +344,192 @@ func makeID(height uint64, commitment []byte) []byte { copy(id[8:], commitment) return id } + +func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit { + var blobSize uint64 + for _, blob := range blobs { + blobSize += uint64(len(blob)) + } + + if len(blobs) == 0 { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: []da.ID{}, + Timestamp: time.Now(), + }, + } + } + + if err := ValidateNamespace(namespace); err != nil { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: fmt.Sprintf("invalid namespace: %s", err.Error()), + BlobSize: blobSize, + }, + } + } + + celestiaBlobs := make([]*Blob, len(blobs)) + for i, blob := range blobs { + commitment, err := CreateCommitment(blob, namespace) + if err != nil { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: fmt.Sprintf("failed to create commitment for blob %d: %s", i, err.Error()), + BlobSize: blobSize, + }, + } + } + celestiaBlobs[i] = &Blob{ + Namespace: namespace, + Data: blob, + Commitment: commitment, + } + } + + var opts *SubmitOptions + if len(options) > 0 { + opts = &SubmitOptions{} + if err := json.Unmarshal(options, opts); err != nil { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: fmt.Sprintf("failed to unmarshal submit options: %s", err.Error()), + BlobSize: blobSize, + }, + } + } + opts.Fee = gasPrice + } else { + opts = &SubmitOptions{Fee: gasPrice} + } + + height, err := c.submit(ctx, celestiaBlobs, opts) + if err != nil { + status := da.StatusError + errStr := err.Error() + + switch { + case errors.Is(err, context.Canceled): + status = da.StatusContextCanceled + case errors.Is(err, context.DeadlineExceeded): + status = da.StatusContextDeadline + case strings.Contains(errStr, "timeout"): + status = da.StatusNotIncludedInBlock + case strings.Contains(errStr, "too large") || strings.Contains(errStr, "exceeds"): + status = da.StatusTooBig + case strings.Contains(errStr, "already in mempool"): + status = da.StatusAlreadyInMempool + case strings.Contains(errStr, "incorrect account sequence"): + status = da.StatusIncorrectAccountSequence + } + + if status == da.StatusTooBig { + c.logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } else { + c.logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } + + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: status, + Message: "failed to submit blobs: " + err.Error(), + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } + } + + ids := make([]da.ID, len(celestiaBlobs)) + for i, blob := range celestiaBlobs { + ids[i] = makeID(height, blob.Commitment) + } + + c.logger.Debug().Int("num_ids", len(ids)).Uint64("height", height).Msg("DA submission successful") + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: height, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } +} + +func (c *Client) Retrieve(ctx context.Context, height uint64, namespace []byte) da.ResultRetrieve { + getCtx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) + defer cancel() + + blobs, err := c.getAll(getCtx, height, []Namespace{namespace}) + if err != nil { + errStr := err.Error() + + if strings.Contains(errStr, "not found") { + c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusNotFound, + Message: da.ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + if strings.Contains(errStr, "height") && strings.Contains(errStr, "future") { + c.logger.Debug().Uint64("height", height).Msg("Height is from the future") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusHeightFromFuture, + Message: da.ErrHeightFromFuture.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + c.logger.Error().Uint64("height", height).Err(err).Msg("Failed to retrieve blobs") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: fmt.Sprintf("failed to retrieve blobs: %s", err.Error()), + Height: height, + Timestamp: time.Now(), + }, + } + } + + if len(blobs) == 0 { + c.logger.Debug().Uint64("height", height).Msg("No blobs found at height") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusNotFound, + Message: da.ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + ids := make([]da.ID, len(blobs)) + data := make([][]byte, len(blobs)) + for i, blob := range blobs { + ids[i] = makeID(height, blob.Commitment) + data[i] = blob.Data + } + + c.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("Successfully retrieved blobs") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + Height: height, + IDs: ids, + Timestamp: time.Now(), + }, + Data: data, + } +} diff --git a/da/cmd/local-da/local.go b/da/cmd/local-da/local.go index 5882438e4c..8e8ff589f1 100644 --- a/da/cmd/local-da/local.go +++ b/da/cmd/local-da/local.go @@ -7,7 +7,6 @@ import ( "crypto/rand" "crypto/sha256" "encoding/binary" - "encoding/hex" "errors" "fmt" "sync" @@ -106,32 +105,16 @@ func (d *LocalDA) Get(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, e } // GetIDs returns IDs of Blobs at given DA height. +// Delegates to Retrieve. func (d *LocalDA) GetIDs(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) { - if err := validateNamespace(ns); err != nil { - d.logger.Error().Err(err).Msg("GetIDs: invalid namespace") - return nil, err - } - d.logger.Debug().Uint64("height", height).Msg("GetIDs called") - d.mu.Lock() - defer d.mu.Unlock() - - if height > d.height { - d.logger.Error().Uint64("requested", height).Uint64("current", d.height).Msg("GetIDs: height in future") - return nil, fmt.Errorf("height %d is in the future: %w", height, da.ErrHeightFromFuture) - } - - kvps, ok := d.data[height] - if !ok { - d.logger.Debug().Uint64("height", height).Msg("GetIDs: no data for height") - return nil, nil - } - - ids := make([]da.ID, len(kvps)) - for i, kv := range kvps { - ids[i] = kv.key + result := d.Retrieve(ctx, height, ns) + if result.Code != da.StatusSuccess { + return nil, da.StatusCodeToError(result.Code, result.Message) } - d.logger.Debug().Int("count", len(ids)).Msg("GetIDs successful") - return &da.GetIDsResult{IDs: ids, Timestamp: d.timestamps[height]}, nil + return &da.GetIDsResult{ + IDs: result.IDs, + Timestamp: result.Timestamp, + }, nil } // GetProofs returns inclusion Proofs for all Blobs located in DA at given height. @@ -172,64 +155,10 @@ func (d *LocalDA) Commit(ctx context.Context, blobs []da.Blob, ns []byte) ([]da. return commits, nil } -// SubmitWithOptions stores blobs in DA layer (options are ignored). -func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte, _ []byte) ([]da.ID, error) { - if err := validateNamespace(ns); err != nil { - d.logger.Error().Err(err).Msg("SubmitWithOptions: invalid namespace") - return nil, err - } - d.logger.Info().Int("numBlobs", len(blobs)).Float64("gasPrice", gasPrice).Str("namespace", hex.EncodeToString(ns)).Msg("SubmitWithOptions called") - - // Validate blob sizes before processing - for i, blob := range blobs { - if uint64(len(blob)) > d.maxBlobSize { - d.logger.Error().Int("blobIndex", i).Int("blobSize", len(blob)).Uint64("maxBlobSize", d.maxBlobSize).Msg("SubmitWithOptions: blob size exceeds limit") - return nil, da.ErrBlobSizeOverLimit - } - } - - d.mu.Lock() - defer d.mu.Unlock() - ids := make([]da.ID, len(blobs)) - d.height += 1 - d.timestamps[d.height] = time.Now() - for i, blob := range blobs { - ids[i] = append(d.nextID(), d.getHash(blob)...) - - d.data[d.height] = append(d.data[d.height], kvp{ids[i], blob}) - } - d.logger.Info().Uint64("newHeight", d.height).Int("count", len(ids)).Msg("SubmitWithOptions successful") - return ids, nil -} - -// Submit stores blobs in DA layer (options are ignored). -func (d *LocalDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { - if err := validateNamespace(ns); err != nil { - d.logger.Error().Err(err).Msg("Submit: invalid namespace") - return nil, err - } - d.logger.Info().Int("numBlobs", len(blobs)).Float64("gasPrice", gasPrice).Str("namespace", string(ns)).Msg("Submit called") - - // Validate blob sizes before processing - for i, blob := range blobs { - if uint64(len(blob)) > d.maxBlobSize { - d.logger.Error().Int("blobIndex", i).Int("blobSize", len(blob)).Uint64("maxBlobSize", d.maxBlobSize).Msg("Submit: blob size exceeds limit") - return nil, da.ErrBlobSizeOverLimit - } - } - - d.mu.Lock() - defer d.mu.Unlock() - ids := make([]da.ID, len(blobs)) - d.height += 1 - d.timestamps[d.height] = time.Now() - for i, blob := range blobs { - ids[i] = append(d.nextID(), d.getHash(blob)...) - - d.data[d.height] = append(d.data[d.height], kvp{ids[i], blob}) - } - d.logger.Info().Uint64("newHeight", d.height).Int("count", len(ids)).Msg("Submit successful") - return ids, nil +// Submit stores blobs in DA layer and returns a structured result. +// Delegates to SubmitWithOptions with nil options. +func (d *LocalDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) da.ResultSubmit { + return d.SubmitWithOptions(ctx, blobs, gasPrice, ns, nil) } // Validate checks the Proofs for given IDs. @@ -279,3 +208,128 @@ func WithMaxBlobSize(maxBlobSize uint64) func(*LocalDA) *LocalDA { return da } } + +// SubmitWithOptions stores blobs in DA layer with additional options and returns a structured result. +// This is the primary implementation - Submit delegates to this method. +func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit { + // Calculate blob size upfront + var blobSize uint64 + for _, blob := range blobs { + blobSize += uint64(len(blob)) + } + + // Validate namespace + if err := validateNamespace(namespace); err != nil { + d.logger.Error().Err(err).Msg("SubmitWithResult: invalid namespace") + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: err.Error(), + BlobSize: blobSize, + }, + } + } + + // Validate blob sizes before processing + for i, blob := range blobs { + if uint64(len(blob)) > d.maxBlobSize { + d.logger.Error().Int("blobIndex", i).Int("blobSize", len(blob)).Uint64("maxBlobSize", d.maxBlobSize).Msg("SubmitWithResult: blob size exceeds limit") + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusTooBig, + Message: "failed to submit blobs: " + da.ErrBlobSizeOverLimit.Error(), + BlobSize: blobSize, + }, + } + } + } + + d.mu.Lock() + defer d.mu.Unlock() + + ids := make([]da.ID, len(blobs)) + d.height++ + d.timestamps[d.height] = time.Now() + for i, blob := range blobs { + ids[i] = append(d.nextID(), d.getHash(blob)...) + d.data[d.height] = append(d.data[d.height], kvp{ids[i], blob}) + } + + d.logger.Debug().Int("num_ids", len(ids)).Uint64("height", d.height).Msg("DA submission successful") + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: d.height, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } +} + +// Retrieve retrieves all blobs at the given height and returns a structured result. +// This is the primary implementation - GetIDs delegates to this method. +func (d *LocalDA) Retrieve(ctx context.Context, height uint64, namespace []byte) da.ResultRetrieve { + // Validate namespace + if err := validateNamespace(namespace); err != nil { + d.logger.Error().Err(err).Msg("Retrieve: invalid namespace") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: err.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + d.mu.Lock() + defer d.mu.Unlock() + + // Check height bounds + if height > d.height { + d.logger.Error().Uint64("requested", height).Uint64("current", d.height).Msg("Retrieve: height in future") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusHeightFromFuture, + Message: da.ErrHeightFromFuture.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + // Get data at height + kvps, ok := d.data[height] + if !ok || len(kvps) == 0 { + d.logger.Debug().Uint64("height", height).Msg("Retrieve: no data for height") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusNotFound, + Message: da.ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + // Extract IDs and blobs + ids := make([]da.ID, len(kvps)) + blobs := make([][]byte, len(kvps)) + for i, kv := range kvps { + ids[i] = kv.key + blobs[i] = kv.value + } + + d.logger.Debug().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("Successfully retrieved blobs") + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + Height: height, + IDs: ids, + Timestamp: d.timestamps[height], + }, + Data: blobs, + } +} diff --git a/da/cmd/local-da/server.go b/da/cmd/local-da/server.go index b45e4ad47c..7330c2183b 100644 --- a/da/cmd/local-da/server.go +++ b/da/cmd/local-da/server.go @@ -88,13 +88,21 @@ func (s *serverInternalAPI) Validate(ctx context.Context, ids []da.ID, proofs [] // Submit implements the RPC method. func (s *serverInternalAPI) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { s.logger.Debug().Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", string(ns)).Msg("RPC server: Submit called") - return s.daImpl.Submit(ctx, blobs, gasPrice, ns) + result := s.daImpl.Submit(ctx, blobs, gasPrice, ns) + if result.Code != da.StatusSuccess { + return result.IDs, da.StatusCodeToError(result.Code, result.Message) + } + return result.IDs, nil } // SubmitWithOptions implements the RPC method. func (s *serverInternalAPI) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte, options []byte) ([]da.ID, error) { s.logger.Debug().Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", string(ns)).Str("options", string(options)).Msg("RPC server: SubmitWithOptions called") - return s.daImpl.SubmitWithOptions(ctx, blobs, gasPrice, ns, options) + result := s.daImpl.SubmitWithOptions(ctx, blobs, gasPrice, ns, options) + if result.Code != da.StatusSuccess { + return result.IDs, da.StatusCodeToError(result.Code, result.Message) + } + return result.IDs, nil } // blobAPI provides Celestia-compatible Blob API methods @@ -123,17 +131,13 @@ func (b *blobAPI) Submit(ctx context.Context, blobs []*Blob, opts *SubmitOptions gasPrice = opts.Fee } - _, err := b.localDA.Submit(ctx, rawBlobs, gasPrice, ns) - if err != nil { - return 0, err + result := b.localDA.Submit(ctx, rawBlobs, gasPrice, ns) + if result.Code != da.StatusSuccess { + return 0, da.StatusCodeToError(result.Code, result.Message) } - b.localDA.mu.Lock() - height := b.localDA.height - b.localDA.mu.Unlock() - - b.logger.Info().Uint64("height", height).Int("num_blobs", len(blobs)).Msg("blob.Submit successful") - return height, nil + b.logger.Info().Uint64("height", result.Height).Int("num_blobs", len(blobs)).Msg("blob.Submit successful") + return result.Height, nil } // Get retrieves a single blob by commitment at a given height (Celestia blob API compatible) diff --git a/da/da.go b/da/da.go index f414e0404a..ab55e33fa0 100644 --- a/da/da.go +++ b/da/da.go @@ -25,11 +25,14 @@ type DA interface { // Commit creates a Commitment for each given Blob. Commit(ctx context.Context, blobs []Blob, namespace []byte) ([]Commitment, error) - // Submit submits the Blobs to Data Availability layer. - Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ([]ID, error) + // Submit submits the Blobs to Data Availability layer and returns a structured result. + Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ResultSubmit // SubmitWithOptions submits the Blobs to Data Availability layer with additional options. - SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ([]ID, error) + SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ResultSubmit + + // Retrieve retrieves all blobs at the given height and returns a structured result. + Retrieve(ctx context.Context, height uint64, namespace []byte) ResultRetrieve // Validate validates Commitments against the corresponding Proofs. Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) @@ -123,6 +126,35 @@ var ( ErrContextCanceled = errors.New("context canceled") ) +// StatusCodeToError converts a StatusCode to its corresponding error. +// Returns nil for StatusSuccess or StatusUnknown. +func StatusCodeToError(code StatusCode, message string) error { + switch code { + case StatusSuccess, StatusUnknown: + return nil + case StatusNotFound: + return ErrBlobNotFound + case StatusNotIncludedInBlock: + return ErrTxTimedOut + case StatusAlreadyInMempool: + return ErrTxAlreadyInMempool + case StatusTooBig: + return ErrBlobSizeOverLimit + case StatusContextDeadline: + return ErrContextDeadline + case StatusIncorrectAccountSequence: + return ErrTxIncorrectAccountSequence + case StatusContextCanceled: + return ErrContextCanceled + case StatusHeightFromFuture: + return ErrHeightFromFuture + case StatusError: + return errors.New(message) + default: + return errors.New(message) + } +} + // Namespace constants and types const ( // NamespaceVersionIndex is the index of the namespace version in the byte slice diff --git a/da/testing.go b/da/testing.go index b730f60b7e..007d6150cd 100644 --- a/da/testing.go +++ b/da/testing.go @@ -90,33 +90,15 @@ func (d *DummyDA) Get(ctx context.Context, ids []ID, namespace []byte) ([]Blob, } // GetIDs returns IDs of all blobs at the given height. +// Delegates to Retrieve. func (d *DummyDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*GetIDsResult, error) { - d.mu.RLock() - defer d.mu.RUnlock() - - if height > d.currentHeight { - return nil, fmt.Errorf("%w: requested %d, current %d", ErrHeightFromFutureStr, height, d.currentHeight) + result := d.Retrieve(ctx, height, namespace) + if result.Code != StatusSuccess { + return nil, StatusCodeToError(result.Code, result.Message) } - - ids, exists := d.blobsByHeight[height] - if !exists { - return &GetIDsResult{ - IDs: []ID{}, - Timestamp: time.Now(), - }, nil - } - - // Filter IDs by namespace - filteredIDs := make([]ID, 0) - for _, id := range ids { - if ns, exists := d.namespaceByID[string(id)]; exists && bytes.Equal(ns, namespace) { - filteredIDs = append(filteredIDs, id) - } - } - return &GetIDsResult{ - IDs: filteredIDs, - Timestamp: d.timestampsByHeight[height], + IDs: result.IDs, + Timestamp: result.Timestamp, }, nil } @@ -150,8 +132,9 @@ func (d *DummyDA) Commit(ctx context.Context, blobs []Blob, namespace []byte) ([ return commitments, nil } -// Submit submits blobs to the DA layer. -func (d *DummyDA) Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ([]ID, error) { +// Submit submits blobs to the DA layer and returns a structured result. +// Delegates to SubmitWithOptions with nil options. +func (d *DummyDA) Submit(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte) ResultSubmit { return d.SubmitWithOptions(ctx, blobs, gasPrice, namespace, nil) } @@ -162,36 +145,67 @@ func (d *DummyDA) SetSubmitFailure(shouldFail bool) { d.submitShouldFail = shouldFail } -// SubmitWithOptions submits blobs to the DA layer with additional options. -func (d *DummyDA) SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ([]ID, error) { +// Validate validates commitments against proofs. +func (d *DummyDA) Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + if len(ids) != len(proofs) { + return nil, errors.New("number of IDs and proofs must match") + } + + results := make([]bool, len(ids)) + for i, id := range ids { + _, exists := d.blobs[string(id)] + results[i] = exists + } + + return results, nil +} + +// SubmitWithOptions submits blobs to the DA layer with additional options and returns a structured result. +// This is the primary implementation - Submit delegates to this method. +func (d *DummyDA) SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice float64, namespace []byte, options []byte) ResultSubmit { + // Calculate blob size upfront + var blobSize uint64 + for _, blob := range blobs { + blobSize += uint64(len(blob)) + } + d.mu.Lock() defer d.mu.Unlock() // Check if we should simulate failure if d.submitShouldFail { - return nil, errors.New("simulated DA layer failure") + return ResultSubmit{ + BaseResult: BaseResult{ + Code: StatusError, + Message: "simulated DA layer failure", + BlobSize: blobSize, + }, + } } height := d.currentHeight + 1 ids := make([]ID, 0, len(blobs)) var currentSize uint64 - for _, blob := range blobs { // Use _ instead of i + for _, blob := range blobs { blobLen := uint64(len(blob)) // Check individual blob size first if blobLen > d.maxBlobSize { - // Mimic DAClient behavior: if the first blob is too large, return error. - // Otherwise, we would have submitted the previous fitting blobs. - // Since DummyDA processes all at once, we return error if any *individual* blob is too large. - // A more complex dummy could simulate partial submission based on cumulative size. - // For now, error out if any single blob is too big. - return nil, ErrBlobSizeOverLimit // Use specific error type + return ResultSubmit{ + BaseResult: BaseResult{ + Code: StatusTooBig, + Message: "failed to submit blobs: " + ErrBlobSizeOverLimit.Error(), + BlobSize: blobSize, + }, + } } // Check cumulative batch size if currentSize+blobLen > d.maxBlobSize { // Stop processing blobs for this batch, return IDs collected so far - // d.logger.Info("DummyDA: Blob size limit reached for batch", "maxBlobSize", d.maxBlobSize, "index", i, "currentSize", currentSize, "nextBlobSize", blobLen) // Removed logger call break } currentSize += blobLen @@ -220,23 +234,80 @@ func (d *DummyDA) SubmitWithOptions(ctx context.Context, blobs []Blob, gasPrice } d.timestampsByHeight[height] = time.Now() - return ids, nil + return ResultSubmit{ + BaseResult: BaseResult{ + Code: StatusSuccess, + IDs: ids, + SubmittedCount: uint64(len(ids)), + Height: height, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } } -// Validate validates commitments against proofs. -func (d *DummyDA) Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) { +// Retrieve retrieves all blobs at the given height and returns a structured result. +// This is the primary implementation - GetIDs delegates to this method. +func (d *DummyDA) Retrieve(ctx context.Context, height uint64, namespace []byte) ResultRetrieve { d.mu.RLock() defer d.mu.RUnlock() - if len(ids) != len(proofs) { - return nil, errors.New("number of IDs and proofs must match") + // Check height bounds + if height > d.currentHeight { + return ResultRetrieve{ + BaseResult: BaseResult{ + Code: StatusHeightFromFuture, + Message: ErrHeightFromFuture.Error(), + Height: height, + Timestamp: time.Now(), + }, + } } - results := make([]bool, len(ids)) - for i, id := range ids { - _, exists := d.blobs[string(id)] - results[i] = exists + // Get IDs at height + ids, exists := d.blobsByHeight[height] + if !exists { + return ResultRetrieve{ + BaseResult: BaseResult{ + Code: StatusNotFound, + Message: ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } } - return results, nil + // Filter IDs by namespace and collect blobs + filteredIDs := make([]ID, 0) + blobs := make([]Blob, 0) + for _, id := range ids { + if ns, nsExists := d.namespaceByID[string(id)]; nsExists && bytes.Equal(ns, namespace) { + filteredIDs = append(filteredIDs, id) + if blob, blobExists := d.blobs[string(id)]; blobExists { + blobs = append(blobs, blob) + } + } + } + + // Handle empty result after namespace filtering + if len(filteredIDs) == 0 { + return ResultRetrieve{ + BaseResult: BaseResult{ + Code: StatusNotFound, + Message: ErrBlobNotFound.Error(), + Height: height, + Timestamp: time.Now(), + }, + } + } + + return ResultRetrieve{ + BaseResult: BaseResult{ + Code: StatusSuccess, + Height: height, + IDs: filteredIDs, + Timestamp: d.timestampsByHeight[height], + }, + Data: blobs, + } } diff --git a/da/testing_test.go b/da/testing_test.go index 9538aacc83..3e12579ef2 100644 --- a/da/testing_test.go +++ b/da/testing_test.go @@ -21,11 +21,12 @@ func TestDummyDA(t *testing.T) { []byte("test blob 1"), []byte("test blob 2"), } - ids, err := dummyDA.Submit(ctx, blobs, 0, nil) - if err != nil { - t.Fatalf("Submit failed: %v", err) + result := dummyDA.Submit(ctx, blobs, 0, nil) + if result.Code != StatusSuccess { + t.Fatalf("Submit failed: %s", result.Message) } - err = waitForFirstDAHeight(ctx, dummyDA) // Wait for height to increment + ids := result.IDs + err := waitForFirstDAHeight(ctx, dummyDA) // Wait for height to increment if err != nil { t.Fatalf("waitForFirstDAHeight failed: %v", err) } @@ -48,12 +49,12 @@ func TestDummyDA(t *testing.T) { } // Test GetIDs - result, err := dummyDA.GetIDs(ctx, 1, nil) + getIDsResult, err := dummyDA.GetIDs(ctx, 1, nil) if err != nil { t.Fatalf("GetIDs failed: %v", err) } - if len(result.IDs) != len(ids) { - t.Errorf("Expected %d IDs, got %d", len(ids), len(result.IDs)) + if len(getIDsResult.IDs) != len(ids) { + t.Errorf("Expected %d IDs, got %d", len(ids), len(getIDsResult.IDs)) } // Test Commit @@ -90,9 +91,9 @@ func TestDummyDA(t *testing.T) { // Test error case: blob size exceeds maximum largeBlob := make([]byte, 2048) // Larger than our max of 1024 - _, err = dummyDA.Submit(ctx, []Blob{largeBlob}, 0, nil) - if err == nil { - t.Errorf("Expected error for blob exceeding max size, got nil") + largeResult := dummyDA.Submit(ctx, []Blob{largeBlob}, 0, nil) + if largeResult.Code == StatusSuccess { + t.Errorf("Expected error for blob exceeding max size, got success") } } diff --git a/test/mocks/da.go b/test/mocks/da.go index afe641eacc..500085d26e 100644 --- a/test/mocks/da.go +++ b/test/mocks/da.go @@ -335,31 +335,19 @@ func (_c *MockDA_GetProofs_Call) RunAndReturn(run func(ctx context.Context, ids } // Submit provides a mock function for the type MockDA -func (_mock *MockDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error) { +func (_mock *MockDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) da.ResultSubmit { ret := _mock.Called(ctx, blobs, gasPrice, namespace) if len(ret) == 0 { panic("no return value specified for Submit") } - var r0 []da.ID - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte) ([]da.ID, error)); ok { + var r0 da.ResultSubmit + if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte) da.ResultSubmit); ok { return returnFunc(ctx, blobs, gasPrice, namespace) } - if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte) []da.ID); ok { - r0 = returnFunc(ctx, blobs, gasPrice, namespace) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]da.ID) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, []da.Blob, float64, []byte) error); ok { - r1 = returnFunc(ctx, blobs, gasPrice, namespace) - } else { - r1 = ret.Error(1) - } - return r0, r1 + r0 = ret.Get(0).(da.ResultSubmit) + return r0 } // MockDA_Submit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Submit' @@ -404,42 +392,30 @@ func (_c *MockDA_Submit_Call) Run(run func(ctx context.Context, blobs []da.Blob, return _c } -func (_c *MockDA_Submit_Call) Return(vs []da.ID, err error) *MockDA_Submit_Call { - _c.Call.Return(vs, err) +func (_c *MockDA_Submit_Call) Return(result da.ResultSubmit) *MockDA_Submit_Call { + _c.Call.Return(result) return _c } -func (_c *MockDA_Submit_Call) RunAndReturn(run func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error)) *MockDA_Submit_Call { +func (_c *MockDA_Submit_Call) RunAndReturn(run func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) da.ResultSubmit) *MockDA_Submit_Call { _c.Call.Return(run) return _c } // SubmitWithOptions provides a mock function for the type MockDA -func (_mock *MockDA) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { +func (_mock *MockDA) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit { ret := _mock.Called(ctx, blobs, gasPrice, namespace, options) if len(ret) == 0 { panic("no return value specified for SubmitWithOptions") } - var r0 []da.ID - var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte, []byte) ([]da.ID, error)); ok { + var r0 da.ResultSubmit + if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte, []byte) da.ResultSubmit); ok { return returnFunc(ctx, blobs, gasPrice, namespace, options) } - if returnFunc, ok := ret.Get(0).(func(context.Context, []da.Blob, float64, []byte, []byte) []da.ID); ok { - r0 = returnFunc(ctx, blobs, gasPrice, namespace, options) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]da.ID) - } - } - if returnFunc, ok := ret.Get(1).(func(context.Context, []da.Blob, float64, []byte, []byte) error); ok { - r1 = returnFunc(ctx, blobs, gasPrice, namespace, options) - } else { - r1 = ret.Error(1) - } - return r0, r1 + r0 = ret.Get(0).(da.ResultSubmit) + return r0 } // MockDA_SubmitWithOptions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitWithOptions' @@ -490,12 +466,12 @@ func (_c *MockDA_SubmitWithOptions_Call) Run(run func(ctx context.Context, blobs return _c } -func (_c *MockDA_SubmitWithOptions_Call) Return(vs []da.ID, err error) *MockDA_SubmitWithOptions_Call { - _c.Call.Return(vs, err) +func (_c *MockDA_SubmitWithOptions_Call) Return(result da.ResultSubmit) *MockDA_SubmitWithOptions_Call { + _c.Call.Return(result) return _c } -func (_c *MockDA_SubmitWithOptions_Call) RunAndReturn(run func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error)) *MockDA_SubmitWithOptions_Call { +func (_c *MockDA_SubmitWithOptions_Call) RunAndReturn(run func(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) da.ResultSubmit) *MockDA_SubmitWithOptions_Call { _c.Call.Return(run) return _c } @@ -579,3 +555,41 @@ func (_c *MockDA_Validate_Call) RunAndReturn(run func(ctx context.Context, ids [ _c.Call.Return(run) return _c } + +// Retrieve provides a mock function for the type MockDA +func (_mock *MockDA) Retrieve(ctx context.Context, height uint64, namespace []byte) da.ResultRetrieve { + ret := _mock.Called(ctx, height, namespace) + + if len(ret) == 0 { + panic("no return value specified for Retrieve") + } + + var r0 da.ResultRetrieve + if returnFunc, ok := ret.Get(0).(func(context.Context, uint64, []byte) da.ResultRetrieve); ok { + return returnFunc(ctx, height, namespace) + } + r0 = ret.Get(0).(da.ResultRetrieve) + return r0 +} + +// MockDA_Retrieve_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Retrieve' +type MockDA_Retrieve_Call struct { + *mock.Call +} + +// Retrieve is a helper method to define mock.On call +func (_e *MockDA_Expecter) Retrieve(ctx interface{}, height interface{}, namespace interface{}) *MockDA_Retrieve_Call { + return &MockDA_Retrieve_Call{Call: _e.mock.On("Retrieve", ctx, height, namespace)} +} + +func (_c *MockDA_Retrieve_Call) Run(run func(ctx context.Context, height uint64, namespace []byte)) *MockDA_Retrieve_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].([]byte)) + }) + return _c +} + +func (_c *MockDA_Retrieve_Call) Return(result da.ResultRetrieve) *MockDA_Retrieve_Call { + _c.Call.Return(result) + return _c +} From 502deaed90296fa74148b7f0f8a9053958613b36 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 14:38:23 +0100 Subject: [PATCH 28/35] go tidy all --- apps/evm/go.sum | 2 -- apps/grpc/go.sum | 2 -- apps/testapp/go.sum | 2 -- go.mod | 2 -- go.sum | 2 -- 5 files changed, 10 deletions(-) diff --git a/apps/evm/go.sum b/apps/evm/go.sum index 702caaa30f..ceeb4a29c2 100644 --- a/apps/evm/go.sum +++ b/apps/evm/go.sum @@ -772,8 +772,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/apps/grpc/go.sum b/apps/grpc/go.sum index d49ddfccbd..d5a7b6f672 100644 --- a/apps/grpc/go.sum +++ b/apps/grpc/go.sum @@ -666,8 +666,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/apps/testapp/go.sum b/apps/testapp/go.sum index 6fbd5b9d26..11b01cf828 100644 --- a/apps/testapp/go.sum +++ b/apps/testapp/go.sum @@ -665,8 +665,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/go.mod b/go.mod index 81b07ce386..a473b3e89e 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,6 @@ require ( golang.org/x/net v0.47.0 golang.org/x/sync v0.18.0 google.golang.org/protobuf v1.36.10 - gotest.tools/v3 v3.5.2 ) require ( @@ -55,7 +54,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/flatbuffers v24.12.23+incompatible // indirect - github.com/google/go-cmp v0.7.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect diff --git a/go.sum b/go.sum index 538e8578c7..f0463efa4c 100644 --- a/go.sum +++ b/go.sum @@ -649,8 +649,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 41fc2b9a87ea0c936c810c46e9ae3ccf3e2e7499 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 15:36:09 +0100 Subject: [PATCH 29/35] rebase from merge problem --- block/internal/syncing/syncer.go | 128 +++++++++++++++++-------------- 1 file changed, 71 insertions(+), 57 deletions(-) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 953a41ea15..b809bdcc39 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -3,6 +3,7 @@ package syncing import ( "bytes" "context" + "encoding/binary" "errors" "fmt" "sync" @@ -52,8 +53,8 @@ type Syncer struct { // State management lastState *atomic.Pointer[types.State] - // DA state - daHeight *atomic.Uint64 + // DA retriever + daRetrieverHeight *atomic.Uint64 // P2P stores headerStore common.Broadcaster[*types.SignedHeader] @@ -64,8 +65,8 @@ type Syncer struct { errorCh chan<- error // Channel to report critical execution client failures // Handlers - daRetriever daRetriever - p2pHandler p2pHandler + daRetrieverHandler daRetriever + p2pHandler p2pHandler // Logging logger zerolog.Logger @@ -95,21 +96,21 @@ func NewSyncer( errorCh chan<- error, ) *Syncer { return &Syncer{ - store: store, - exec: exec, - da: da, - cache: cache, - metrics: metrics, - config: config, - genesis: genesis, - options: options, - headerStore: headerStore, - dataStore: dataStore, - lastState: &atomic.Pointer[types.State]{}, - daHeight: &atomic.Uint64{}, - heightInCh: make(chan common.DAHeightEvent, 1_000), - errorCh: errorCh, - logger: logger.With().Str("component", "syncer").Logger(), + store: store, + exec: exec, + da: da, + cache: cache, + metrics: metrics, + config: config, + genesis: genesis, + options: options, + headerStore: headerStore, + dataStore: dataStore, + lastState: &atomic.Pointer[types.State]{}, + daRetrieverHeight: &atomic.Uint64{}, + heightInCh: make(chan common.DAHeightEvent, 1_000), + errorCh: errorCh, + logger: logger.With().Str("component", "syncer").Logger(), } } @@ -117,13 +118,12 @@ func NewSyncer( func (s *Syncer) Start(ctx context.Context) error { s.ctx, s.cancel = context.WithCancel(ctx) - // Initialize state if err := s.initializeState(); err != nil { return fmt.Errorf("failed to initialize syncer state: %w", err) } // Initialize handlers - s.daRetriever = NewDARetriever(s.da, s.cache, s.config, s.genesis, s.logger) + s.daRetrieverHandler = NewDARetriever(s.da, s.cache, s.config, s.genesis, s.logger) s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") @@ -131,6 +131,10 @@ func (s *Syncer) Start(ctx context.Context) error { s.p2pHandler.SetProcessedHeight(currentHeight) } + if !s.waitForGenesis() { + return nil + } + // Start main processing loop s.wg.Add(1) go func() { @@ -175,16 +179,6 @@ func (s *Syncer) SetLastState(state types.State) { s.lastState.Store(&state) } -// GetDAHeight returns the current DA height -func (s *Syncer) GetDAHeight() uint64 { - return s.daHeight.Load() -} - -// SetDAHeight updates the DA height -func (s *Syncer) SetDAHeight(height uint64) { - s.daHeight.Store(height) -} - // initializeState loads the current sync state func (s *Syncer) initializeState() error { // Load state from store @@ -216,12 +210,13 @@ func (s *Syncer) initializeState() error { } s.SetLastState(state) - // Set DA height - s.SetDAHeight(state.DAHeight) + // Set DA height to the maximum of the genesis start height, the state's DA height, the cached DA height, and the highest stored included DA height. + // This ensures we resume from the highest known DA height, even if the cache is cleared on restart. If the DA height is too high because of a user error, reset it with --evnode.clear_cache. The DA height will be back to the last highest known executed DA height for a height. + s.daRetrieverHeight.Store(max(s.genesis.DAStartHeight, s.cache.DaHeight(), state.DAHeight, s.getHighestStoredDAHeight())) s.logger.Info(). Uint64("height", state.LastBlockHeight). - Uint64("da_height", s.GetDAHeight()). + Uint64("da_height", s.daRetrieverHeight.Load()). Str("chain_id", state.ChainID). Msg("initialized syncer state") @@ -259,10 +254,6 @@ func (s *Syncer) startSyncWorkers() { func (s *Syncer) daWorkerLoop() { defer s.wg.Done() - if !s.waitForGenesis() { - return - } - s.logger.Info().Msg("starting DA worker") defer s.logger.Info().Msg("DA worker stopped") @@ -297,17 +288,13 @@ func (s *Syncer) fetchDAUntilCaughtUp() error { default: } - daHeight := s.GetDAHeight() - - // Create a new context with a timeout for the DA call - ctx, cancel := context.WithTimeout(s.ctx, 5*time.Second) - defer cancel() + daHeight := max(s.daRetrieverHeight.Load(), s.cache.DaHeight()) - events, err := s.daRetriever.RetrieveFromDA(ctx, daHeight) + events, err := s.daRetrieverHandler.RetrieveFromDA(s.ctx, daHeight) if err != nil { switch { case errors.Is(err, da.ErrBlobNotFound): - s.SetDAHeight(daHeight + 1) + s.daRetrieverHeight.Store(daHeight + 1) continue // Fetch next height immediately case errors.Is(err, da.ErrHeightFromFuture): s.logger.Debug().Err(err).Uint64("da_height", daHeight).Msg("DA is ahead of local target; backing off future height requests") @@ -332,18 +319,14 @@ func (s *Syncer) fetchDAUntilCaughtUp() error { } } - // increment DA height on successful retrieval - s.SetDAHeight(daHeight + 1) + // increment DA retrieval height on successful retrieval + s.daRetrieverHeight.Store(daHeight + 1) } } func (s *Syncer) pendingWorkerLoop() { defer s.wg.Done() - if !s.waitForGenesis() { - return - } - s.logger.Info().Msg("starting pending worker") defer s.logger.Info().Msg("pending worker stopped") @@ -363,10 +346,6 @@ func (s *Syncer) pendingWorkerLoop() { func (s *Syncer) p2pWorkerLoop() { defer s.wg.Done() - if !s.waitForGenesis() { - return - } - logger := s.logger.With().Str("worker", "p2p").Logger() logger.Info().Msg("starting P2P worker") defer logger.Info().Msg("P2P worker stopped") @@ -547,13 +526,14 @@ func (s *Syncer) trySyncNextBlock(event *common.DAHeightEvent) error { return err } - // Apply block newState, err := s.applyBlock(header.Header, data, currentState) if err != nil { return fmt.Errorf("failed to apply block: %w", err) } // Update DA height if needed + // This height is only updated when a height is processed from DA as P2P + // events do not contain DA height information if event.DaHeight > newState.DAHeight { newState.DAHeight = event.DaHeight } @@ -621,7 +601,8 @@ func (s *Syncer) applyBlock(header types.Header, data *types.Data, currentState return newState, nil } -// executeTxsWithRetry executes transactions with retry logic +// executeTxsWithRetry executes transactions with retry logic. +// NOTE: the function retries the execution client call regardless of the error. Some execution clients errors are irrecoverable, and will eventually halt the node, as expected. func (s *Syncer) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, header types.Header, currentState types.State) ([]byte, error) { for attempt := 1; attempt <= common.MaxRetriesBeforeHalt; attempt++ { newAppHash, _, err := s.exec.ExecuteTxs(ctx, rawTxs, header.Height(), header.Time(), currentState.AppHash) @@ -750,6 +731,39 @@ func (s *Syncer) sleepOrDone(duration time.Duration) bool { } } +// getHighestStoredDAHeight retrieves the highest DA height from the store by checking +// the DA heights stored for the last DA included height +// this relies on the node syncing with DA and setting included heights. +func (s *Syncer) getHighestStoredDAHeight() uint64 { + // Get the DA included height from store + daIncludedHeightBytes, err := s.store.GetMetadata(s.ctx, store.DAIncludedHeightKey) + if err != nil || len(daIncludedHeightBytes) != 8 { + return 0 + } + daIncludedHeight := binary.LittleEndian.Uint64(daIncludedHeightBytes) + if daIncludedHeight == 0 { + return 0 + } + + var highestDAHeight uint64 + + // Get header DA height for the last included height + headerKey := store.GetHeightToDAHeightHeaderKey(daIncludedHeight) + if headerBytes, err := s.store.GetMetadata(s.ctx, headerKey); err == nil && len(headerBytes) == 8 { + headerDAHeight := binary.LittleEndian.Uint64(headerBytes) + highestDAHeight = max(highestDAHeight, headerDAHeight) + } + + // Get data DA height for the last included height + dataKey := store.GetHeightToDAHeightDataKey(daIncludedHeight) + if dataBytes, err := s.store.GetMetadata(s.ctx, dataKey); err == nil && len(dataBytes) == 8 { + dataDAHeight := binary.LittleEndian.Uint64(dataBytes) + highestDAHeight = max(highestDAHeight, dataDAHeight) + } + + return highestDAHeight +} + type p2pWaitState struct { height uint64 cancel context.CancelFunc From e57eb44c08cffca9dd2ceceed31c5bc20cc7483e Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 15:53:45 +0100 Subject: [PATCH 30/35] rebase from merge problem 2 --- block/internal/syncing/da_retriever.go | 104 ++++++++++-------- block/internal/syncing/da_retriever_test.go | 2 +- block/internal/syncing/p2p_handler.go | 5 + block/internal/syncing/syncer.go | 21 +--- .../internal/syncing/syncer_benchmark_test.go | 2 +- block/internal/syncing/syncer_test.go | 6 +- 6 files changed, 77 insertions(+), 63 deletions(-) diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 274d1ad33b..bffde35f81 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -11,21 +11,26 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - dapkg "github.com/evstack/ev-node/da" + da "github.com/evstack/ev-node/da" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) -// DARetriever handles DA retrieval operations for syncing -type DARetriever struct { - da dapkg.DA - cache cache.Manager +// DARetriever defines the interface for retrieving events from the DA layer +type DARetriever interface { + RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) +} + +// daRetriever handles DA retrieval operations for syncing +type daRetriever struct { + da da.DA + cache cache.CacheManager genesis genesis.Genesis logger zerolog.Logger - // calculate namespaces bytes once and reuse them + // namespace bytes calculated once namespaceBz []byte namespaceDataBz []byte @@ -37,26 +42,26 @@ type DARetriever struct { // NewDARetriever creates a new DA retriever func NewDARetriever( - da dapkg.DA, - cache cache.Manager, + daClient da.DA, + cache cache.CacheManager, config config.Config, genesis genesis.Genesis, logger zerolog.Logger, -) *DARetriever { - return &DARetriever{ - da: da, +) *daRetriever { + return &daRetriever{ + da: daClient, cache: cache, genesis: genesis, logger: logger.With().Str("component", "da_retriever").Logger(), - namespaceBz: dapkg.NamespaceFromString(config.DA.GetNamespace()).Bytes(), - namespaceDataBz: dapkg.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), + namespaceBz: da.NamespaceFromString(config.DA.GetNamespace()).Bytes(), + namespaceDataBz: da.NamespaceFromString(config.DA.GetDataNamespace()).Bytes(), pendingHeaders: make(map[uint64]*types.SignedHeader), pendingData: make(map[uint64]*types.Data), } } // RetrieveFromDA retrieves blocks from the specified DA height and returns height events -func (r *DARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { +func (r *daRetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) { r.logger.Debug().Uint64("da_height", daHeight).Msg("retrieving from DA") blobsResp, err := r.fetchBlobs(ctx, daHeight) if err != nil { @@ -72,9 +77,9 @@ func (r *DARetriever) RetrieveFromDA(ctx context.Context, daHeight uint64) ([]co return r.processBlobs(ctx, blobsResp.Data, daHeight), nil } -// fetchBlobs retrieves blobs from the DA layer -func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (dapkg.ResultRetrieve, error) { - // Retrieve from both namespaces +// fetchBlobs retrieves blobs from both header and data namespaces +func (r *daRetriever) fetchBlobs(ctx context.Context, daHeight uint64) (da.ResultRetrieve, error) { + // Retrieve from header namespace headerRes := r.da.Retrieve(ctx, daHeight, r.namespaceBz) // If namespaces are the same, return header result @@ -87,31 +92,31 @@ func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (dapkg.Re // Validate responses headerErr := r.validateBlobResponse(headerRes, daHeight) // ignoring error not found, as data can have data - if headerErr != nil && !errors.Is(headerErr, dapkg.ErrBlobNotFound) { + if headerErr != nil && !errors.Is(headerErr, da.ErrBlobNotFound) { return headerRes, headerErr } dataErr := r.validateBlobResponse(dataRes, daHeight) // ignoring error not found, as header can have data - if dataErr != nil && !errors.Is(dataErr, dapkg.ErrBlobNotFound) { + if dataErr != nil && !errors.Is(dataErr, da.ErrBlobNotFound) { return dataRes, dataErr } // Combine successful results - combinedResult := dapkg.ResultRetrieve{ - BaseResult: dapkg.BaseResult{ - Code: dapkg.StatusSuccess, + combinedResult := da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, Height: daHeight, }, Data: make([][]byte, 0), } - if headerRes.Code == dapkg.StatusSuccess { + if headerRes.Code == da.StatusSuccess { combinedResult.Data = append(combinedResult.Data, headerRes.Data...) combinedResult.IDs = append(combinedResult.IDs, headerRes.IDs...) } - if dataRes.Code == dapkg.StatusSuccess { + if dataRes.Code == da.StatusSuccess { combinedResult.Data = append(combinedResult.Data, dataRes.Data...) combinedResult.IDs = append(combinedResult.IDs, dataRes.IDs...) } @@ -119,25 +124,25 @@ func (r *DARetriever) fetchBlobs(ctx context.Context, daHeight uint64) (dapkg.Re // Re-throw error not found if both were not found. if len(combinedResult.Data) == 0 && len(combinedResult.IDs) == 0 { r.logger.Debug().Uint64("da_height", daHeight).Msg("no blob data found") - combinedResult.Code = dapkg.StatusNotFound - combinedResult.Message = dapkg.ErrBlobNotFound.Error() - return combinedResult, dapkg.ErrBlobNotFound + combinedResult.Code = da.StatusNotFound + combinedResult.Message = da.ErrBlobNotFound.Error() + return combinedResult, da.ErrBlobNotFound } return combinedResult, nil } // validateBlobResponse validates a blob response from DA layer -// those are the only error code returned by dapkg.RetrieveWithHelpers -func (r *DARetriever) validateBlobResponse(res dapkg.ResultRetrieve, daHeight uint64) error { +// those are the only error code returned by da.RetrieveWithHelpers +func (r *daRetriever) validateBlobResponse(res da.ResultRetrieve, daHeight uint64) error { switch res.Code { - case dapkg.StatusError: + case da.StatusError: return fmt.Errorf("DA retrieval failed: %s", res.Message) - case dapkg.StatusHeightFromFuture: - return fmt.Errorf("%w: height from future", dapkg.ErrHeightFromFuture) - case dapkg.StatusNotFound: - return fmt.Errorf("%w: blob not found", dapkg.ErrBlobNotFound) - case dapkg.StatusSuccess: + case da.StatusHeightFromFuture: + return fmt.Errorf("%w: height from future", da.ErrHeightFromFuture) + case da.StatusNotFound: + return fmt.Errorf("%w: blob not found", da.ErrBlobNotFound) + case da.StatusSuccess: r.logger.Debug().Uint64("da_height", daHeight).Msg("successfully retrieved from DA") return nil default: @@ -146,7 +151,7 @@ func (r *DARetriever) validateBlobResponse(res dapkg.ResultRetrieve, daHeight ui } // processBlobs processes retrieved blobs to extract headers and data and returns height events -func (r *DARetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { +func (r *daRetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight uint64) []common.DAHeightEvent { // Decode all blobs for _, bz := range blobs { if len(bz) == 0 { @@ -207,15 +212,28 @@ func (r *DARetriever) processBlobs(ctx context.Context, blobs [][]byte, daHeight } events = append(events, event) + } - r.logger.Info().Uint64("height", height).Uint64("da_height", daHeight).Msg("processed block from DA") + if len(events) > 0 { + startHeight := events[0].Header.Height() + endHeight := events[0].Header.Height() + for _, event := range events { + h := event.Header.Height() + if h < startHeight { + startHeight = h + } + if h > endHeight { + endHeight = h + } + } + r.logger.Info().Uint64("da_height", daHeight).Uint64("start_height", startHeight).Uint64("end_height", endHeight).Msg("processed blocks from DA") } return events } // tryDecodeHeader attempts to decode a blob as a header -func (r *DARetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedHeader { +func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedHeader { header := new(types.SignedHeader) var headerPb pb.SignedHeader @@ -245,7 +263,7 @@ func (r *DARetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH headerHash := header.Hash().String() r.cache.SetHeaderDAIncluded(headerHash, daHeight, header.Height()) - r.logger.Info(). + r.logger.Debug(). Str("header_hash", headerHash). Uint64("da_height", daHeight). Uint64("height", header.Height()). @@ -255,7 +273,7 @@ func (r *DARetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH } // tryDecodeData attempts to decode a blob as signed data -func (r *DARetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { +func (r *daRetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { var signedData types.SignedData if err := signedData.UnmarshalBinary(bz); err != nil { return nil @@ -276,7 +294,7 @@ func (r *DARetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { dataHash := signedData.Data.DACommitment().String() r.cache.SetDataDAIncluded(dataHash, daHeight, signedData.Height()) - r.logger.Info(). + r.logger.Debug(). Str("data_hash", dataHash). Uint64("da_height", daHeight). Uint64("height", signedData.Height()). @@ -286,7 +304,7 @@ func (r *DARetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { } // assertExpectedProposer validates the proposer address -func (r *DARetriever) assertExpectedProposer(proposerAddr []byte) error { +func (r *daRetriever) assertExpectedProposer(proposerAddr []byte) error { if string(proposerAddr) != string(r.genesis.ProposerAddress) { return fmt.Errorf("unexpected proposer: got %x, expected %x", proposerAddr, r.genesis.ProposerAddress) @@ -295,7 +313,7 @@ func (r *DARetriever) assertExpectedProposer(proposerAddr []byte) error { } // assertValidSignedData validates signed data using the configured signature provider -func (r *DARetriever) assertValidSignedData(signedData *types.SignedData) error { +func (r *daRetriever) assertValidSignedData(signedData *types.SignedData) error { if signedData == nil || signedData.Txs == nil { return errors.New("empty signed data") } diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 4e9afed466..1bd8d87e86 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -270,7 +270,7 @@ func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { } func TestDARetriever_validateBlobResponse(t *testing.T) { - r := &DARetriever{logger: zerolog.Nop()} + r := &daRetriever{logger: zerolog.Nop()} // StatusSuccess -> nil err := r.validateBlobResponse(da.ResultRetrieve{BaseResult: da.BaseResult{Code: da.StatusSuccess}}, 1) assert.NoError(t, err) diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 6e036aa76f..d8c10bc4c3 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -15,6 +15,11 @@ import ( "github.com/evstack/ev-node/types" ) +type p2pHandler interface { + ProcessHeight(ctx context.Context, height uint64, heightInCh chan<- common.DAHeightEvent) error + SetProcessedHeight(height uint64) +} + // P2PHandler coordinates block retrieval from P2P stores for the syncer. // It waits for both header and data to be available at a given height, // validates their consistency, and emits events to the syncer for processing. diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index b809bdcc39..b51f0304db 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -25,15 +25,6 @@ import ( "github.com/evstack/ev-node/types" ) -type daRetriever interface { - RetrieveFromDA(ctx context.Context, daHeight uint64) ([]common.DAHeightEvent, error) -} - -type p2pHandler interface { - ProcessHeight(ctx context.Context, height uint64, heightInCh chan<- common.DAHeightEvent) error - SetProcessedHeight(height uint64) -} - // Syncer handles block synchronization from DA and P2P sources. type Syncer struct { // Core components @@ -42,7 +33,7 @@ type Syncer struct { da da.DA // Shared components - cache cache.Manager + cache cache.CacheManager metrics *common.Metrics // Configuration @@ -65,8 +56,8 @@ type Syncer struct { errorCh chan<- error // Channel to report critical execution client failures // Handlers - daRetrieverHandler daRetriever - p2pHandler p2pHandler + daRetriever DARetriever + p2pHandler p2pHandler // Logging logger zerolog.Logger @@ -85,7 +76,7 @@ func NewSyncer( store store.Store, exec coreexecutor.Executor, da da.DA, - cache cache.Manager, + cache cache.CacheManager, metrics *common.Metrics, config config.Config, genesis genesis.Genesis, @@ -123,7 +114,7 @@ func (s *Syncer) Start(ctx context.Context) error { } // Initialize handlers - s.daRetrieverHandler = NewDARetriever(s.da, s.cache, s.config, s.genesis, s.logger) + s.daRetriever = NewDARetriever(s.da, s.cache, s.config, s.genesis, s.logger) s.p2pHandler = NewP2PHandler(s.headerStore.Store(), s.dataStore.Store(), s.cache, s.genesis, s.logger) if currentHeight, err := s.store.Height(s.ctx); err != nil { s.logger.Error().Err(err).Msg("failed to set initial processed height for p2p handler") @@ -290,7 +281,7 @@ func (s *Syncer) fetchDAUntilCaughtUp() error { daHeight := max(s.daRetrieverHeight.Load(), s.cache.DaHeight()) - events, err := s.daRetrieverHandler.RetrieveFromDA(s.ctx, daHeight) + events, err := s.daRetriever.RetrieveFromDA(s.ctx, daHeight) if err != nil { switch { case errors.Is(err, da.ErrBlobNotFound): diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index 12b60c11c2..bb714d4faa 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -58,7 +58,7 @@ func BenchmarkSyncerIO(b *testing.B) { } require.Len(b, fixt.s.heightInCh, 0) - assert.Equal(b, spec.heights+daHeightOffset, fixt.s.daHeight.Load()) + assert.Equal(b, spec.heights+daHeightOffset, fixt.s.daRetrieverHeight.Load()) gotStoreHeight, err := fixt.s.store.Height(b.Context()) require.NoError(b, err) assert.Equal(b, spec.heights, gotStoreHeight) diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 4bc73f6bba..bc256c2f7f 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -419,7 +419,7 @@ func TestSyncLoopPersistState(t *testing.T) { requireEmptyChan(t, errorCh) t.Log("sync workers on instance1 completed") - require.Equal(t, myFutureDAHeight, syncerInst1.daHeight.Load()) + require.Equal(t, myFutureDAHeight, syncerInst1.daRetrieverHeight.Load()) // wait for all events consumed require.NoError(t, cacheMgr.SaveToDisk()) @@ -469,7 +469,7 @@ func TestSyncLoopPersistState(t *testing.T) { Run(func(arg mock.Arguments) { cancel() // retrieve last one again - assert.Equal(t, syncerInst2.daHeight.Load(), arg.Get(1).(uint64)) + assert.Equal(t, syncerInst2.daRetrieverHeight.Load(), arg.Get(1).(uint64)) }). Return(nil, nil) @@ -617,7 +617,7 @@ func TestSyncer_InitializeState_CallsReplayer(t *testing.T) { exec: mockExec, genesis: gen, lastState: &atomic.Pointer[types.State]{}, - daHeight: &atomic.Uint64{}, + daRetrieverHeight: &atomic.Uint64{}, logger: zerolog.Nop(), ctx: context.Background(), cache: cm, From 8eb1b8ec1fd84a3748834a1e784153f7f235e6cb Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 16:06:49 +0100 Subject: [PATCH 31/35] rebase from merge problem 3 --- block/internal/syncing/da_retriever_test.go | 139 ++++++++---------- block/internal/syncing/syncer_backoff_test.go | 4 +- .../internal/syncing/syncer_benchmark_test.go | 6 +- block/internal/syncing/syncer_test.go | 90 +++++++++--- 4 files changed, 135 insertions(+), 104 deletions(-) diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 1bd8d87e86..ee22a52055 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -7,8 +7,6 @@ import ( "testing" "time" - "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" @@ -21,14 +19,33 @@ import ( "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" - "github.com/evstack/ev-node/pkg/store" testmocks "github.com/evstack/ev-node/test/mocks" "github.com/evstack/ev-node/types" ) +// newTestDARetriever creates a DA retriever for testing with the given DA implementation +func newTestDARetriever(t *testing.T, mockDA da.DA, cfg config.Config, gen genesis.Genesis) *daRetriever { + t.Helper() + if cfg.DA.Namespace == "" { + cfg.DA.Namespace = "test-ns" + } + if cfg.DA.DataNamespace == "" { + cfg.DA.DataNamespace = "test-data-ns" + } + + cm, err := cache.NewCacheManager(cfg, zerolog.Nop()) + require.NoError(t, err) + + return NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) +} + // makeSignedDataBytes builds SignedData containing the provided Data and returns its binary encoding func makeSignedDataBytes(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer, txs int) ([]byte, *types.SignedData) { - d := &types.Data{Metadata: &types.Metadata{ChainID: chainID, Height: height, Time: uint64(time.Now().UnixNano())}} + return makeSignedDataBytesWithTime(t, chainID, height, proposer, pub, signer, txs, uint64(time.Now().UnixNano())) +} + +func makeSignedDataBytesWithTime(t *testing.T, chainID string, height uint64, proposer []byte, pub crypto.PubKey, signer signerpkg.Signer, txs int, timestamp uint64) ([]byte, *types.SignedData) { + d := &types.Data{Metadata: &types.Metadata{ChainID: chainID, Height: height, Time: timestamp}} if txs > 0 { d.Txs = make(types.Txs, txs) for i := 0; i < txs; i++ { @@ -37,8 +54,7 @@ func makeSignedDataBytes(t *testing.T, chainID string, height uint64, proposer [ } // For DA SignedData, sign the Data payload bytes (matches DA submission logic) - payload, err := d.MarshalBinary() - require.NoError(t, err) + payload, _ := d.MarshalBinary() sig, err := signer.Sign(payload) require.NoError(t, err) sd := &types.SignedData{Data: *d, Signature: sig, Signer: types.Signer{PubKey: pub, Address: proposer}} @@ -48,11 +64,6 @@ func makeSignedDataBytes(t *testing.T, chainID string, height uint64, proposer [ } func TestDARetriever_RetrieveFromDA_Invalid(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - assert.NoError(t, err) - mockDA := testmocks.NewMockDA(t) mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). @@ -63,18 +74,13 @@ func TestDARetriever_RetrieveFromDA_Invalid(t *testing.T) { }, }).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) assert.Error(t, err) assert.Len(t, events, 0) } func TestDARetriever_RetrieveFromDA_NotFound(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - assert.NoError(t, err) - mockDA := testmocks.NewMockDA(t) // Retrieve returns StatusNotFound @@ -86,19 +92,15 @@ func TestDARetriever_RetrieveFromDA_NotFound(t *testing.T) { }, }).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) assert.True(t, errors.Is(err, da.ErrBlobNotFound)) assert.Len(t, events, 0) } func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) - mockDA := testmocks.NewMockDA(t) + // Retrieve returns StatusHeightFromFuture mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). Return(da.ResultRetrieve{ @@ -108,7 +110,7 @@ func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { }, }).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, derr := r.RetrieveFromDA(context.Background(), 1000) assert.Error(t, derr) assert.True(t, errors.Is(derr, da.ErrHeightFromFuture)) @@ -116,51 +118,53 @@ func TestDARetriever_RetrieveFromDA_HeightFromFuture(t *testing.T) { } func TestDARetriever_RetrieveFromDA_Timeout(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) + t.Skip("Skipping flaky timeout test - timing is now controlled by DA client") mockDA := testmocks.NewMockDA(t) - // Mock Retrieve to return timeout error + // Mock Retrieve to hang longer than the timeout mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). + Run(func(ctx context.Context, height uint64, namespace []byte) { + <-ctx.Done() + }). Return(da.ResultRetrieve{ BaseResult: da.BaseResult{ Code: da.StatusError, - Message: "failed to get IDs: context deadline exceeded", + Message: context.DeadlineExceeded.Error(), }, }).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) + start := time.Now() events, err := r.RetrieveFromDA(context.Background(), 42) + duration := time.Since(start) // Verify error is returned and contains deadline exceeded information require.Error(t, err) assert.Contains(t, err.Error(), "DA retrieval failed") assert.Contains(t, err.Error(), "context deadline exceeded") assert.Len(t, events, 0) + + // Verify timeout occurred approximately at expected time (with some tolerance) + // DA client has a 30-second default timeout + assert.Greater(t, duration, 29*time.Second, "should timeout after approximately 30 seconds") + assert.Less(t, duration, 35*time.Second, "should not take much longer than timeout") } func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) - mockDA := testmocks.NewMockDA(t) - // Mock Retrieve to immediately return context deadline exceeded + // Mock Retrieve to return error with context deadline exceeded mockDA.EXPECT().Retrieve(mock.Anything, mock.Anything, mock.Anything). Return(da.ResultRetrieve{ BaseResult: da.BaseResult{ Code: da.StatusError, - Message: "failed to get IDs: context deadline exceeded", + Message: context.DeadlineExceeded.Error(), }, }).Maybe() - r := NewDARetriever(mockDA, cm, config.DefaultConfig(), genesis.Genesis{}, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, config.DefaultConfig(), genesis.Genesis{}) events, err := r.RetrieveFromDA(context.Background(), 42) @@ -172,15 +176,11 @@ func TestDARetriever_RetrieveFromDA_TimeoutFast(t *testing.T) { } func TestDARetriever_ProcessBlobs_HeaderAndData_Success(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) dataBin, data := makeSignedDataBytes(t, gen.ChainID, 2, addr, pub, signer, 2) hdrBin, _ := makeSignedHeaderBytes(t, gen.ChainID, 2, addr, pub, signer, nil, &data.Data, nil) @@ -201,14 +201,10 @@ func TestDARetriever_ProcessBlobs_HeaderAndData_Success(t *testing.T) { } func TestDARetriever_ProcessBlobs_HeaderOnly_EmptyDataExpected(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Header with no data hash present should trigger empty data creation (per current logic) hb, _ := makeSignedHeaderBytes(t, gen.ChainID, 3, addr, pub, signer, nil, nil, nil) @@ -229,14 +225,10 @@ func TestDARetriever_ProcessBlobs_HeaderOnly_EmptyDataExpected(t *testing.T) { } func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) hb, sh := makeSignedHeaderBytes(t, gen.ChainID, 5, addr, pub, signer, nil, nil, nil) gotH := r.tryDecodeHeader(hb, 123) @@ -254,15 +246,11 @@ func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { } func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) goodAddr, pub, signer := buildSyncTestSigner(t) badAddr := []byte("not-the-proposer") gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: badAddr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Signed data is made by goodAddr; retriever expects badAddr -> should be rejected db, _ := makeSignedDataBytes(t, gen.ChainID, 7, goodAddr, pub, signer, 1) @@ -284,10 +272,6 @@ func TestDARetriever_validateBlobResponse(t *testing.T) { } func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} @@ -304,28 +288,29 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { namespaceDataBz := da.NamespaceFromString(cfg.DA.GetDataNamespace()).Bytes() mockDA := testmocks.NewMockDA(t) - // Expect Retrieve for both namespaces + // Expect Retrieve for header namespace mockDA.EXPECT().Retrieve(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceBz) })). Return(da.ResultRetrieve{ BaseResult: da.BaseResult{ - Code: da.StatusSuccess, - IDs: [][]byte{[]byte("h1")}, - Timestamp: time.Now(), + Code: da.StatusSuccess, + Height: 1234, + IDs: [][]byte{[]byte("h1")}, }, Data: [][]byte{hdrBin}, }).Once() + // Expect Retrieve for data namespace mockDA.EXPECT().Retrieve(mock.Anything, uint64(1234), mock.MatchedBy(func(ns []byte) bool { return bytes.Equal(ns, namespaceDataBz) })). Return(da.ResultRetrieve{ BaseResult: da.BaseResult{ - Code: da.StatusSuccess, - IDs: [][]byte{[]byte("d1")}, - Timestamp: time.Now(), + Code: da.StatusSuccess, + Height: 1234, + IDs: [][]byte{[]byte("d1")}, }, Data: [][]byte{dataBin}, }).Once() - r := NewDARetriever(mockDA, cm, cfg, gen, zerolog.Nop()) + r := newTestDARetriever(t, mockDA, cfg, gen) events, derr := r.RetrieveFromDA(context.Background(), 1234) require.NoError(t, derr) @@ -335,15 +320,11 @@ func TestDARetriever_RetrieveFromDA_TwoNamespaces_Success(t *testing.T) { } func TestDARetriever_ProcessBlobs_CrossDAHeightMatching(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Create header and data for the same block height but from different DA heights dataBin, data := makeSignedDataBytes(t, gen.ChainID, 5, addr, pub, signer, 2) @@ -371,15 +352,11 @@ func TestDARetriever_ProcessBlobs_CrossDAHeightMatching(t *testing.T) { } func TestDARetriever_ProcessBlobs_MultipleHeadersCrossDAHeightMatching(t *testing.T) { - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) - require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} - r := NewDARetriever(nil, cm, config.DefaultConfig(), gen, zerolog.Nop()) + r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) // Create multiple headers and data for different block heights data3Bin, data3 := makeSignedDataBytes(t, gen.ChainID, 3, addr, pub, signer, 1) diff --git a/block/internal/syncing/syncer_backoff_test.go b/block/internal/syncing/syncer_backoff_test.go index d302a857a0..47dab7325e 100644 --- a/block/internal/syncing/syncer_backoff_test.go +++ b/block/internal/syncing/syncer_backoff_test.go @@ -15,8 +15,8 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - "github.com/evstack/ev-node/core/execution" da "github.com/evstack/ev-node/da" + "github.com/evstack/ev-node/core/execution" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" @@ -326,7 +326,7 @@ func setupTestSyncer(t *testing.T, daBlockTime time.Duration) *Syncer { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, _, _ := buildSyncTestSigner(t) diff --git a/block/internal/syncing/syncer_benchmark_test.go b/block/internal/syncing/syncer_benchmark_test.go index bb714d4faa..e2b6f6e51f 100644 --- a/block/internal/syncing/syncer_benchmark_test.go +++ b/block/internal/syncing/syncer_benchmark_test.go @@ -58,7 +58,7 @@ func BenchmarkSyncerIO(b *testing.B) { } require.Len(b, fixt.s.heightInCh, 0) - assert.Equal(b, spec.heights+daHeightOffset, fixt.s.daRetrieverHeight.Load()) + assert.Equal(b, spec.heights+daHeightOffset, fixt.s.daRetrieverHeight) gotStoreHeight, err := fixt.s.store.Height(b.Context()) require.NoError(b, err) assert.Equal(b, spec.heights, gotStoreHeight) @@ -70,7 +70,7 @@ func BenchmarkSyncerIO(b *testing.B) { type benchFixture struct { s *Syncer st store.Store - cm cache.Manager + cm cache.CacheManager cancel context.CancelFunc } @@ -81,7 +81,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(b, err) addr, pub, signer := buildSyncTestSigner(b) diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index bc256c2f7f..9cfce76adf 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -4,13 +4,14 @@ import ( "context" crand "crypto/rand" "crypto/sha512" + "encoding/binary" "errors" "sync/atomic" "testing" "time" - "github.com/evstack/ev-node/core/execution" da "github.com/evstack/ev-node/da" + "github.com/evstack/ev-node/core/execution" "github.com/evstack/ev-node/pkg/genesis" signerpkg "github.com/evstack/ev-node/pkg/signer" "github.com/evstack/ev-node/pkg/signer/noop" @@ -104,7 +105,7 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -153,7 +154,7 @@ func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -208,7 +209,7 @@ func TestSequentialBlockSync(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) addr, pub, signer := buildSyncTestSigner(t) @@ -281,7 +282,7 @@ func TestSyncer_processPendingEvents(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) // current height 1 @@ -327,7 +328,7 @@ func TestSyncLoopPersistState(t *testing.T) { cfg.RootDir = t.TempDir() cfg.ClearCache = true - cacheMgr, err := cache.NewManager(cfg, st, zerolog.Nop()) + cacheMgr, err := cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err) const myDAHeightOffset = uint64(1) @@ -437,7 +438,7 @@ func TestSyncLoopPersistState(t *testing.T) { require.Nil(t, event, "event at height %d should have been removed", blockHeight) } // and when new instance is up on restart - cacheMgr, err = cache.NewManager(cfg, st, zerolog.Nop()) + cacheMgr, err = cache.NewCacheManager(cfg, zerolog.Nop()) require.NoError(t, err) require.NoError(t, cacheMgr.LoadFromDisk()) @@ -574,9 +575,7 @@ func TestSyncer_InitializeState_CallsReplayer(t *testing.T) { // This test verifies that initializeState() invokes Replayer. // The detailed replay logic is tested in block/internal/common/replay_test.go - ds := dssync.MutexWrap(datastore.NewMapDatastore()) - st := store.New(ds) - cm, err := cache.NewManager(config.DefaultConfig(), st, zerolog.Nop()) + cm, err := cache.NewCacheManager(config.DefaultConfig(), zerolog.Nop()) require.NoError(t, err) // Create mocks @@ -606,21 +605,21 @@ func TestSyncer_InitializeState_CallsReplayer(t *testing.T) { ) // Mock GetMetadata calls for DA included height retrieval - mockStore.EXPECT().GetMetadata(mock.Anything, mock.Anything).Return(nil, datastore.ErrNotFound).Maybe() + mockStore.EXPECT().GetMetadata(mock.Anything, store.DAIncludedHeightKey).Return(nil, datastore.ErrNotFound) // Setup execution layer to be in sync mockExec.On("GetLatestHeight", mock.Anything).Return(storeHeight, nil) // Create syncer with minimal dependencies syncer := &Syncer{ - store: mockStore, - exec: mockExec, - genesis: gen, - lastState: &atomic.Pointer[types.State]{}, + store: mockStore, + exec: mockExec, + genesis: gen, + lastState: &atomic.Pointer[types.State]{}, daRetrieverHeight: &atomic.Uint64{}, - logger: zerolog.Nop(), - ctx: context.Background(), - cache: cm, + logger: zerolog.Nop(), + ctx: context.Background(), + cache: cm, } // Initialize state - this should call Replayer @@ -644,3 +643,58 @@ func requireEmptyChan(t *testing.T, errorCh chan error) { default: } } + +func TestSyncer_getHighestStoredDAHeight(t *testing.T) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + st := store.New(ds) + ctx := context.Background() + + syncer := &Syncer{ + store: st, + ctx: ctx, + logger: zerolog.Nop(), + } + + // Test case 1: No DA included height set + highestDA := syncer.getHighestStoredDAHeight() + assert.Equal(t, uint64(0), highestDA) + + // Test case 2: DA included height set, but no mappings + bz := make([]byte, 8) + binary.LittleEndian.PutUint64(bz, 1) + require.NoError(t, st.SetMetadata(ctx, store.DAIncludedHeightKey, bz)) + + highestDA = syncer.getHighestStoredDAHeight() + assert.Equal(t, uint64(0), highestDA) + + // Test case 3: DA included height with header mapping + headerBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(headerBytes, 100) + require.NoError(t, st.SetMetadata(ctx, store.GetHeightToDAHeightHeaderKey(1), headerBytes)) + + highestDA = syncer.getHighestStoredDAHeight() + assert.Equal(t, uint64(100), highestDA) + + // Test case 4: DA included height with both header and data mappings (data is higher) + dataBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(dataBytes, 105) + require.NoError(t, st.SetMetadata(ctx, store.GetHeightToDAHeightDataKey(1), dataBytes)) + + highestDA = syncer.getHighestStoredDAHeight() + assert.Equal(t, uint64(105), highestDA) + + // Test case 5: Advance to height 2 with higher DA heights + binary.LittleEndian.PutUint64(bz, 2) + require.NoError(t, st.SetMetadata(ctx, store.DAIncludedHeightKey, bz)) + + headerBytes2 := make([]byte, 8) + binary.LittleEndian.PutUint64(headerBytes2, 200) + require.NoError(t, st.SetMetadata(ctx, store.GetHeightToDAHeightHeaderKey(2), headerBytes2)) + + dataBytes2 := make([]byte, 8) + binary.LittleEndian.PutUint64(dataBytes2, 195) + require.NoError(t, st.SetMetadata(ctx, store.GetHeightToDAHeightDataKey(2), dataBytes2)) + + highestDA = syncer.getHighestStoredDAHeight() + assert.Equal(t, uint64(200), highestDA, "should return highest DA height from most recent included height") +} From a9376b031b47b2d02c3b50b3652267abd0c8f8c4 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 16:17:47 +0100 Subject: [PATCH 32/35] remove da interface --- da/cmd/local-da/server.go | 68 +-------------------------------------- 1 file changed, 1 insertion(+), 67 deletions(-) diff --git a/da/cmd/local-da/server.go b/da/cmd/local-da/server.go index 7330c2183b..18037c17aa 100644 --- a/da/cmd/local-da/server.go +++ b/da/cmd/local-da/server.go @@ -43,68 +43,11 @@ type Server struct { srv *http.Server rpc *jsonrpc.RPCServer listener net.Listener - daImpl da.DA - localDA *LocalDA // For blob API access to internal data + localDA *LocalDA started atomic.Bool } -// serverInternalAPI provides the actual RPC methods. -type serverInternalAPI struct { - logger zerolog.Logger - daImpl da.DA -} - -// Get implements the RPC method. -func (s *serverInternalAPI) Get(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) { - s.logger.Debug().Int("num_ids", len(ids)).Str("namespace", string(ns)).Msg("RPC server: Get called") - return s.daImpl.Get(ctx, ids, ns) -} - -// GetIDs implements the RPC method. -func (s *serverInternalAPI) GetIDs(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) { - s.logger.Debug().Uint64("height", height).Str("namespace", string(ns)).Msg("RPC server: GetIDs called") - return s.daImpl.GetIDs(ctx, height, ns) -} - -// GetProofs implements the RPC method. -func (s *serverInternalAPI) GetProofs(ctx context.Context, ids []da.ID, ns []byte) ([]da.Proof, error) { - s.logger.Debug().Int("num_ids", len(ids)).Str("namespace", string(ns)).Msg("RPC server: GetProofs called") - return s.daImpl.GetProofs(ctx, ids, ns) -} - -// Commit implements the RPC method. -func (s *serverInternalAPI) Commit(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) { - s.logger.Debug().Int("num_blobs", len(blobs)).Str("namespace", string(ns)).Msg("RPC server: Commit called") - return s.daImpl.Commit(ctx, blobs, ns) -} - -// Validate implements the RPC method. -func (s *serverInternalAPI) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, ns []byte) ([]bool, error) { - s.logger.Debug().Int("num_ids", len(ids)).Int("num_proofs", len(proofs)).Str("namespace", string(ns)).Msg("RPC server: Validate called") - return s.daImpl.Validate(ctx, ids, proofs, ns) -} - -// Submit implements the RPC method. -func (s *serverInternalAPI) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { - s.logger.Debug().Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", string(ns)).Msg("RPC server: Submit called") - result := s.daImpl.Submit(ctx, blobs, gasPrice, ns) - if result.Code != da.StatusSuccess { - return result.IDs, da.StatusCodeToError(result.Code, result.Message) - } - return result.IDs, nil -} - -// SubmitWithOptions implements the RPC method. -func (s *serverInternalAPI) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte, options []byte) ([]da.ID, error) { - s.logger.Debug().Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", string(ns)).Str("options", string(options)).Msg("RPC server: SubmitWithOptions called") - result := s.daImpl.SubmitWithOptions(ctx, blobs, gasPrice, ns, options) - if result.Code != da.StatusSuccess { - return result.IDs, da.StatusCodeToError(result.Code, result.Message) - } - return result.IDs, nil -} - // blobAPI provides Celestia-compatible Blob API methods type blobAPI struct { logger zerolog.Logger @@ -293,13 +236,11 @@ func getKnownErrorsMapping() jsonrpc.Errors { } // NewServer creates a new JSON-RPC server for the LocalDA implementation -// It registers both the legacy "da" namespace and the Celestia-compatible "blob" namespace func NewServer(logger zerolog.Logger, address, port string, localDA *LocalDA) *Server { rpc := jsonrpc.NewServer(jsonrpc.WithServerErrors(getKnownErrorsMapping())) srv := &Server{ rpc: rpc, logger: logger, - daImpl: localDA, localDA: localDA, srv: &http.Server{ Addr: address + ":" + port, @@ -308,13 +249,6 @@ func NewServer(logger zerolog.Logger, address, port string, localDA *LocalDA) *S } srv.srv.Handler = http.HandlerFunc(rpc.ServeHTTP) - // Register legacy "da" namespace API - daAPIHandler := &serverInternalAPI{ - logger: logger, - daImpl: localDA, - } - srv.rpc.Register("da", daAPIHandler) - // Register Celestia-compatible "blob" namespace API blobAPIHandler := &blobAPI{ logger: logger, From eb55ca8c9777d20b84e320a5eb48dcf3704d9798 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 16:36:15 +0100 Subject: [PATCH 33/35] clean errors --- da/celestia/client.go | 61 +++---------------------------------------- 1 file changed, 4 insertions(+), 57 deletions(-) diff --git a/da/celestia/client.go b/da/celestia/client.go index f01b5b51fe..e9fca3b21f 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -4,10 +4,8 @@ import ( "context" "encoding/binary" "encoding/json" - "errors" "fmt" "net/http" - "strings" "time" "github.com/filecoin-project/go-jsonrpc" @@ -245,9 +243,6 @@ func (c *Client) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.B for height := range heightGroups { blobs, err := c.getAll(ctx, height, []Namespace{namespace}) if err != nil { - if strings.Contains(err.Error(), "not found") { - return nil, da.ErrBlobNotFound - } return nil, fmt.Errorf("failed to get blobs at height %d: %w", height, err) } @@ -409,34 +404,11 @@ func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPric height, err := c.submit(ctx, celestiaBlobs, opts) if err != nil { - status := da.StatusError - errStr := err.Error() - - switch { - case errors.Is(err, context.Canceled): - status = da.StatusContextCanceled - case errors.Is(err, context.DeadlineExceeded): - status = da.StatusContextDeadline - case strings.Contains(errStr, "timeout"): - status = da.StatusNotIncludedInBlock - case strings.Contains(errStr, "too large") || strings.Contains(errStr, "exceeds"): - status = da.StatusTooBig - case strings.Contains(errStr, "already in mempool"): - status = da.StatusAlreadyInMempool - case strings.Contains(errStr, "incorrect account sequence"): - status = da.StatusIncorrectAccountSequence - } - - if status == da.StatusTooBig { - c.logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") - } else { - c.logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") - } - + c.logger.Error().Err(err).Msg("DA submission failed") return da.ResultSubmit{ BaseResult: da.BaseResult{ - Code: status, - Message: "failed to submit blobs: " + err.Error(), + Code: da.StatusError, + Message: err.Error(), BlobSize: blobSize, Timestamp: time.Now(), }, @@ -467,36 +439,11 @@ func (c *Client) Retrieve(ctx context.Context, height uint64, namespace []byte) blobs, err := c.getAll(getCtx, height, []Namespace{namespace}) if err != nil { - errStr := err.Error() - - if strings.Contains(errStr, "not found") { - c.logger.Debug().Uint64("height", height).Msg("Blobs not found at height") - return da.ResultRetrieve{ - BaseResult: da.BaseResult{ - Code: da.StatusNotFound, - Message: da.ErrBlobNotFound.Error(), - Height: height, - Timestamp: time.Now(), - }, - } - } - if strings.Contains(errStr, "height") && strings.Contains(errStr, "future") { - c.logger.Debug().Uint64("height", height).Msg("Height is from the future") - return da.ResultRetrieve{ - BaseResult: da.BaseResult{ - Code: da.StatusHeightFromFuture, - Message: da.ErrHeightFromFuture.Error(), - Height: height, - Timestamp: time.Now(), - }, - } - } - c.logger.Error().Uint64("height", height).Err(err).Msg("Failed to retrieve blobs") return da.ResultRetrieve{ BaseResult: da.BaseResult{ Code: da.StatusError, - Message: fmt.Sprintf("failed to retrieve blobs: %s", err.Error()), + Message: err.Error(), Height: height, Timestamp: time.Now(), }, From 59e16ff3009ccfca567947708107bc31e38ae6be Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 17:35:15 +0100 Subject: [PATCH 34/35] use same proof as in json client --- da/celestia/client.go | 29 +++++++++++++++-------- da/celestia/client_test.go | 6 +++-- da/celestia/types.go | 47 +++++++++++++++++++++++++++----------- da/celestia/types_test.go | 21 ++++++++++------- 4 files changed, 71 insertions(+), 32 deletions(-) diff --git a/da/celestia/client.go b/da/celestia/client.go index e9fca3b21f..32a1eebacf 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -178,9 +178,13 @@ func (c *Client) getProof(ctx context.Context, height uint64, namespace Namespac return nil, fmt.Errorf("failed to get proof: %w", err) } + proofSegments := 0 + if proof != nil { + proofSegments = len(*proof) + } c.logger.Debug(). Uint64("height", height). - Int("proof_size", len(proof.Data)). + Int("proof_segments", proofSegments). Msg("Successfully retrieved proof") return proof, nil @@ -287,7 +291,11 @@ func (c *Client) GetProofs(ctx context.Context, ids []da.ID, namespace []byte) ( return nil, fmt.Errorf("failed to get proof for ID %d: %w", i, err) } - proofs[i] = proof.Data + encodedProof, err := json.Marshal(proof) + if err != nil { + return nil, fmt.Errorf("failed to marshal proof for ID %d: %w", i, err) + } + proofs[i] = encodedProof } return proofs, nil @@ -320,8 +328,12 @@ func (c *Client) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, n return nil, fmt.Errorf("invalid ID at index %d: %w", i, err) } - proof := &Proof{Data: proofs[i]} - included, err := c.included(ctx, height, namespace, proof, commitment) + var proof Proof + if err := json.Unmarshal(proofs[i], &proof); err != nil { + return nil, fmt.Errorf("failed to decode proof %d: %w", i, err) + } + + included, err := c.included(ctx, height, namespace, &proof, commitment) if err != nil { return nil, fmt.Errorf("failed to validate proof %d: %w", i, err) } @@ -380,14 +392,14 @@ func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPric } celestiaBlobs[i] = &Blob{ Namespace: namespace, + ShareVer: 0, Data: blob, Commitment: commitment, } } - var opts *SubmitOptions + opts := &SubmitOptions{} if len(options) > 0 { - opts = &SubmitOptions{} if err := json.Unmarshal(options, opts); err != nil { return da.ResultSubmit{ BaseResult: da.BaseResult{ @@ -397,10 +409,9 @@ func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPric }, } } - opts.Fee = gasPrice - } else { - opts = &SubmitOptions{Fee: gasPrice} } + opts.GasPrice = gasPrice + opts.IsGasPriceSet = true height, err := c.submit(ctx, celestiaBlobs, opts) if err != nil { diff --git a/da/celestia/client_test.go b/da/celestia/client_test.go index 9d57d5cc42..4a3a56db26 100644 --- a/da/celestia/client_test.go +++ b/da/celestia/client_test.go @@ -7,6 +7,8 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/celestiaorg/nmt" ) func TestNewClient(t *testing.T) { @@ -215,9 +217,9 @@ func TestClient_Included(t *testing.T) { validNamespace := make([]byte, 29) validCommitment := []byte("commitment") - proof := &Proof{Data: []byte("proof")} + proof := Proof{&nmt.Proof{}} - _, err = client.included(ctx, 100, validNamespace, proof, validCommitment) + _, err = client.included(ctx, 100, validNamespace, &proof, validCommitment) require.Error(t, err) assert.Contains(t, err.Error(), "failed to check inclusion") } diff --git a/da/celestia/types.go b/da/celestia/types.go index 496a151474..ac414f5a98 100644 --- a/da/celestia/types.go +++ b/da/celestia/types.go @@ -3,6 +3,10 @@ package celestia import ( "encoding/json" "fmt" + + "github.com/celestiaorg/nmt" + + "github.com/evstack/ev-node/da" ) // Namespace represents a Celestia namespace (29 bytes: 1 version + 28 ID) @@ -15,38 +19,55 @@ type Commitment []byte type Blob struct { Namespace Namespace `json:"namespace"` Data []byte `json:"data"` - ShareVer uint32 `json:"share_version"` + ShareVer uint8 `json:"share_version"` Commitment Commitment `json:"commitment"` + Signer []byte `json:"signer,omitempty"` Index int `json:"index"` } // Proof represents a Celestia inclusion proof -type Proof struct { - Data []byte `json:"data"` -} +type Proof []*nmt.Proof // SubmitOptions contains options for blob submission type SubmitOptions struct { - Fee float64 `json:"fee,omitempty"` - GasLimit uint64 `json:"gas_limit,omitempty"` - SignerAddress string `json:"signer_address,omitempty"` + GasPrice float64 `json:"gas_price,omitempty"` + IsGasPriceSet bool `json:"is_gas_price_set,omitempty"` + MaxGasPrice float64 `json:"max_gas_price,omitempty"` + Gas uint64 `json:"gas,omitempty"` + TxPriority int `json:"tx_priority,omitempty"` + KeyName string `json:"key_name,omitempty"` + SignerAddress string `json:"signer_address,omitempty"` + FeeGranterAddress string `json:"fee_granter_address,omitempty"` } // MarshalJSON implements json.Marshaler for Proof -func (p *Proof) MarshalJSON() ([]byte, error) { - return json.Marshal(p.Data) +func (p Proof) MarshalJSON() ([]byte, error) { + return json.Marshal([]*nmt.Proof(p)) } // UnmarshalJSON implements json.Unmarshaler for Proof func (p *Proof) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &p.Data) + var proofs []*nmt.Proof + if err := json.Unmarshal(data, &proofs); err != nil { + return err + } + *p = proofs + return nil } // ValidateNamespace validates that a namespace is properly formatted (29 bytes). func ValidateNamespace(ns Namespace) error { - const NamespaceSize = 29 - if len(ns) != NamespaceSize { - return fmt.Errorf("invalid namespace size: got %d, expected %d", len(ns), NamespaceSize) + if len(ns) != da.NamespaceSize { + return fmt.Errorf("invalid namespace size: got %d, expected %d", len(ns), da.NamespaceSize) + } + + parsed, err := da.NamespaceFromBytes(ns) + if err != nil { + return fmt.Errorf("invalid namespace: %w", err) + } + + if parsed.Version != da.NamespaceVersionZero || !parsed.IsValidForVersion0() { + return fmt.Errorf("invalid namespace: only version 0 namespaces with first %d zero bytes are supported", da.NamespaceVersionZeroPrefixSize) } return nil } diff --git a/da/celestia/types_test.go b/da/celestia/types_test.go index a4587587c0..0d96069c5d 100644 --- a/da/celestia/types_test.go +++ b/da/celestia/types_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "testing" + "github.com/celestiaorg/nmt" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -49,8 +50,8 @@ func TestValidateNamespace(t *testing.T) { } func TestProofJSONMarshaling(t *testing.T) { - proof := &Proof{ - Data: []byte{1, 2, 3, 4, 5}, + proof := Proof{ + &nmt.Proof{}, } // Marshal @@ -62,14 +63,16 @@ func TestProofJSONMarshaling(t *testing.T) { err = json.Unmarshal(data, &decoded) require.NoError(t, err) - assert.Equal(t, proof.Data, decoded.Data) + assert.Equal(t, len(proof), len(decoded)) } func TestSubmitOptionsJSON(t *testing.T) { opts := &SubmitOptions{ - Fee: 0.002, - GasLimit: 100000, - SignerAddress: "celestia1abc123", + GasPrice: 0.002, + IsGasPriceSet: true, + Gas: 100000, + SignerAddress: "celestia1abc123", + FeeGranterAddress: "celestia1feegranter", } // Marshal @@ -81,7 +84,9 @@ func TestSubmitOptionsJSON(t *testing.T) { err = json.Unmarshal(data, &decoded) require.NoError(t, err) - assert.Equal(t, opts.Fee, decoded.Fee) - assert.Equal(t, opts.GasLimit, decoded.GasLimit) + assert.Equal(t, opts.GasPrice, decoded.GasPrice) + assert.Equal(t, opts.IsGasPriceSet, decoded.IsGasPriceSet) + assert.Equal(t, opts.Gas, decoded.Gas) assert.Equal(t, opts.SignerAddress, decoded.SignerAddress) + assert.Equal(t, opts.FeeGranterAddress, decoded.FeeGranterAddress) } From a6639a68670cc826b62bfdd0d4efe9269f512426 Mon Sep 17 00:00:00 2001 From: Randy Grok Date: Thu, 27 Nov 2025 21:46:36 +0100 Subject: [PATCH 35/35] add client --- da/celestia/client.go | 166 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 145 insertions(+), 21 deletions(-) diff --git a/da/celestia/client.go b/da/celestia/client.go index 32a1eebacf..6ab10475b8 100644 --- a/da/celestia/client.go +++ b/da/celestia/client.go @@ -4,8 +4,10 @@ import ( "context" "encoding/binary" "encoding/json" + "errors" "fmt" "net/http" + "strings" "time" "github.com/filecoin-project/go-jsonrpc" @@ -17,9 +19,6 @@ import ( // defaultRetrieveTimeout is the default timeout for DA retrieval operations const defaultRetrieveTimeout = 10 * time.Second -// retrieveBatchSize is the number of blobs to retrieve in a single batch -const retrieveBatchSize = 100 - // Client connects to celestia-node's blob API via JSON-RPC and implements the da.DA interface. type Client struct { logger zerolog.Logger @@ -378,6 +377,28 @@ func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPric } } + // Enforce max blob size locally so callers can handle StatusTooBig (used by submitter to split batches) + for i, blob := range blobs { + if uint64(len(blob)) > c.maxBlobSize { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusTooBig, + Message: fmt.Sprintf("blob %d exceeds max blob size (%d > %d)", i, len(blob), c.maxBlobSize), + BlobSize: blobSize, + }, + } + } + } + if blobSize > c.maxBlobSize { + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: da.StatusTooBig, + Message: fmt.Sprintf("total blob size exceeds max blob size (%d > %d)", blobSize, c.maxBlobSize), + BlobSize: blobSize, + }, + } + } + celestiaBlobs := make([]*Blob, len(blobs)) for i, blob := range blobs { commitment, err := CreateCommitment(blob, namespace) @@ -415,15 +436,7 @@ func (c *Client) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPric height, err := c.submit(ctx, celestiaBlobs, opts) if err != nil { - c.logger.Error().Err(err).Msg("DA submission failed") - return da.ResultSubmit{ - BaseResult: da.BaseResult{ - Code: da.StatusError, - Message: err.Error(), - BlobSize: blobSize, - Timestamp: time.Now(), - }, - } + return c.handleSubmitError(err, blobSize) } ids := make([]da.ID, len(celestiaBlobs)) @@ -450,15 +463,7 @@ func (c *Client) Retrieve(ctx context.Context, height uint64, namespace []byte) blobs, err := c.getAll(getCtx, height, []Namespace{namespace}) if err != nil { - c.logger.Error().Uint64("height", height).Err(err).Msg("Failed to retrieve blobs") - return da.ResultRetrieve{ - BaseResult: da.BaseResult{ - Code: da.StatusError, - Message: err.Error(), - Height: height, - Timestamp: time.Now(), - }, - } + return c.handleRetrieveError(err, height) } if len(blobs) == 0 { @@ -491,3 +496,122 @@ func (c *Client) Retrieve(ctx context.Context, height uint64, namespace []byte) Data: data, } } + +// handleSubmitError maps errors from the blob API to DA status codes and returns a ResultSubmit. +func (c *Client) handleSubmitError(err error, blobSize uint64) da.ResultSubmit { + status := da.StatusError + message := err.Error() + + var rpcErr *jsonrpc.JSONRPCError + if errors.As(err, &rpcErr) { + switch rpcErr.Code { + case jsonrpc.ErrorCode(da.StatusNotIncludedInBlock): + status = da.StatusNotIncludedInBlock + case jsonrpc.ErrorCode(da.StatusAlreadyInMempool): + status = da.StatusAlreadyInMempool + case jsonrpc.ErrorCode(da.StatusTooBig): + status = da.StatusTooBig + case jsonrpc.ErrorCode(da.StatusIncorrectAccountSequence): + status = da.StatusIncorrectAccountSequence + case jsonrpc.ErrorCode(da.StatusContextDeadline): + status = da.StatusContextDeadline + case jsonrpc.ErrorCode(da.StatusContextCanceled): + status = da.StatusContextCanceled + } + if rpcErr.Message != "" { + message = rpcErr.Message + } + } + + if status == da.StatusError { + errStr := err.Error() + switch { + case errors.Is(err, context.Canceled): + status = da.StatusContextCanceled + case errors.Is(err, context.DeadlineExceeded): + status = da.StatusContextDeadline + case strings.Contains(errStr, "timeout"): + status = da.StatusNotIncludedInBlock + case strings.Contains(errStr, "blob(s) too large"), + strings.Contains(errStr, "total blob size too large"), + strings.Contains(errStr, "too large"), + strings.Contains(errStr, "exceeds"): + status = da.StatusTooBig + case strings.Contains(errStr, "already in mempool"), + strings.Contains(errStr, "tx already exists in cache"): + status = da.StatusAlreadyInMempool + case strings.Contains(errStr, "incorrect account sequence"), + strings.Contains(errStr, "account sequence mismatch"): + status = da.StatusIncorrectAccountSequence + } + } + + if status == da.StatusTooBig { + c.logger.Debug().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } else { + c.logger.Error().Err(err).Uint64("status", uint64(status)).Msg("DA submission failed") + } + + return da.ResultSubmit{ + BaseResult: da.BaseResult{ + Code: status, + Message: message, + BlobSize: blobSize, + Timestamp: time.Now(), + }, + } +} + +// handleRetrieveError maps blob API errors to DA status codes and returns a ResultRetrieve. +func (c *Client) handleRetrieveError(err error, height uint64) da.ResultRetrieve { + status := da.StatusError + message := err.Error() + + var rpcErr *jsonrpc.JSONRPCError + if errors.As(err, &rpcErr) { + switch rpcErr.Code { + case jsonrpc.ErrorCode(da.StatusNotFound): + status = da.StatusNotFound + case jsonrpc.ErrorCode(da.StatusHeightFromFuture): + status = da.StatusHeightFromFuture + case jsonrpc.ErrorCode(da.StatusContextDeadline): + status = da.StatusContextDeadline + case jsonrpc.ErrorCode(da.StatusContextCanceled): + status = da.StatusContextCanceled + } + if rpcErr.Message != "" { + message = rpcErr.Message + } + } + + if status == da.StatusError { + errStr := err.Error() + switch { + case strings.Contains(errStr, "not found"): + status = da.StatusNotFound + message = da.ErrBlobNotFound.Error() + case strings.Contains(errStr, "height") && strings.Contains(errStr, "future"): + status = da.StatusHeightFromFuture + message = da.ErrHeightFromFuture.Error() + case errors.Is(err, context.Canceled): + status = da.StatusContextCanceled + case errors.Is(err, context.DeadlineExceeded): + status = da.StatusContextDeadline + } + } + + if status == da.StatusNotFound || status == da.StatusHeightFromFuture { + c.logger.Debug().Uint64("height", height).Str("status", fmt.Sprintf("%d", status)).Msg("Retrieve returned non-success status") + } else { + c.logger.Error().Uint64("height", height).Err(err).Uint64("status", uint64(status)).Msg("Failed to retrieve blobs") + } + + return da.ResultRetrieve{ + BaseResult: da.BaseResult{ + Code: status, + Message: message, + Height: height, + Timestamp: time.Now(), + }, + } +}