diff --git a/types/header.go b/types/header.go index 04c3f8eb24..20dd4b1aa5 100644 --- a/types/header.go +++ b/types/header.go @@ -68,6 +68,11 @@ type Header struct { // We keep this in case users choose another signature format where the // pubkey can't be recovered by the signature (e.g. ed25519). ProposerAddress []byte // original proposer of the block + + // Legacy holds fields that were removed from the canonical header JSON/Go + // representation but may still be required for backwards compatible binary + // serialization (e.g. legacy signing payloads). + Legacy *LegacyHeaderFields } // New creates a new Header. @@ -133,3 +138,115 @@ var ( _ encoding.BinaryMarshaler = &Header{} _ encoding.BinaryUnmarshaler = &Header{} ) + +// LegacyHeaderFields captures the deprecated header fields that existed prior +// to the header minimisation change. When populated, these values are re-used +// while constructing the protobuf payload so that legacy nodes can continue to +// verify signatures and hashes. +// +// # Migration Guide +// +// This compatibility layer enables networks to sync from genesis after the header +// minimization changes. The system automatically handles both legacy and slim +// header formats through a multi-format verification fallback mechanism. +// +// ## Format Detection +// +// Headers are decoded and verified using the following strategy: +// 1. Try custom signature provider (if configured) +// 2. Try slim header format (new format without legacy fields) +// 3. Try legacy header format (includes LastCommitHash, ConsensusHash, LastResultsHash) +// +// The Legacy field is automatically populated during deserialization when legacy +// fields are detected in the protobuf unknown fields (field numbers 5, 7, 9). +// +// ## For Block Producers +// +// New blocks should use the slim header format by default (Legacy == nil). +// The legacy encoding is only required when: +// - Syncing historical blocks from genesis +// - Interoperating with legacy nodes +// - Verifying signatures on historical blocks +// +// ## For Node Operators +// +// Nodes will automatically: +// - Decode legacy headers when syncing from genesis +// - Verify signatures using the appropriate format +// - Handle mixed networks with both old and new nodes +// +// No configuration changes are required for the migration. +// +// ## Legacy Field Defaults +// +// When encoding legacy headers, ConsensusHash defaults to a 32-byte zero hash +// if not explicitly set, matching the historical behavior. Other legacy fields +// remain nil if unset. +type LegacyHeaderFields struct { + LastCommitHash Hash + ConsensusHash Hash + LastResultsHash Hash +} + +// IsZero reports whether all legacy fields are unset. +func (l *LegacyHeaderFields) IsZero() bool { + if l == nil { + return true + } + return len(l.LastCommitHash) == 0 && + len(l.ConsensusHash) == 0 && + len(l.LastResultsHash) == 0 +} + +// EnsureDefaults initialises missing legacy fields with their historical +// default values so that the legacy protobuf payload matches the pre-change +// encoding. +func (l *LegacyHeaderFields) EnsureDefaults() { + if l.ConsensusHash == nil { + l.ConsensusHash = make(Hash, 32) + } +} + +// Clone returns a deep copy of the legacy fields. +func (l *LegacyHeaderFields) Clone() *LegacyHeaderFields { + if l == nil { + return nil + } + clone := &LegacyHeaderFields{ + LastCommitHash: cloneBytes(l.LastCommitHash), + ConsensusHash: cloneBytes(l.ConsensusHash), + LastResultsHash: cloneBytes(l.LastResultsHash), + } + return clone +} + +// ApplyLegacyDefaults ensures the Header has a Legacy block initialised with +// the expected defaults so that legacy serialization works without callers +// needing to populate every field explicitly. +func (h *Header) ApplyLegacyDefaults() { + if h.Legacy == nil { + h.Legacy = &LegacyHeaderFields{} + } + h.Legacy.EnsureDefaults() +} + +// Clone creates a deep copy of the header, ensuring all mutable slices are +// duplicated to avoid unintended sharing between variants. +func (h Header) Clone() Header { + clone := h + clone.LastHeaderHash = cloneBytes(h.LastHeaderHash) + clone.DataHash = cloneBytes(h.DataHash) + clone.AppHash = cloneBytes(h.AppHash) + clone.ValidatorHash = cloneBytes(h.ValidatorHash) + clone.ProposerAddress = cloneBytes(h.ProposerAddress) + clone.Legacy = h.Legacy.Clone() + + return clone +} + +func cloneBytes(b []byte) []byte { + if len(b) == 0 { + return nil + } + return append([]byte(nil), b...) +} diff --git a/types/serialization.go b/types/serialization.go index 8dfdef4fa5..cfc4445c09 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -2,9 +2,11 @@ package types import ( "errors" + "fmt" "time" "github.com/libp2p/go-libp2p/core/crypto" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" @@ -31,6 +33,12 @@ func (h *Header) MarshalBinary() ([]byte, error) { return proto.Marshal(h.ToProto()) } +// MarshalBinaryLegacy returns the legacy header encoding that includes the +// deprecated fields. +func (h *Header) MarshalBinaryLegacy() ([]byte, error) { + return marshalLegacyHeader(h) +} + // UnmarshalBinary decodes binary form of Header into object. func (h *Header) UnmarshalBinary(data []byte) error { var pHeader pb.Header @@ -140,7 +148,7 @@ func (sh *SignedHeader) UnmarshalBinary(data []byte) error { // ToProto converts Header into protobuf representation and returns it. func (h *Header) ToProto() *pb.Header { - return &pb.Header{ + pHeader := &pb.Header{ Version: &pb.Version{ Block: h.Version.Block, App: h.Version.App, @@ -154,6 +162,7 @@ func (h *Header) ToProto() *pb.Header { ChainId: h.BaseHeader.ChainID, ValidatorHash: h.ValidatorHash, } + return pHeader } // FromProto fills Header with data from its protobuf representation. @@ -198,6 +207,13 @@ func (h *Header) FromProto(other *pb.Header) error { } else { h.ValidatorHash = nil } + + legacy, err := decodeLegacyHeaderFields(other) + if err != nil { + return err + } + h.Legacy = legacy + return nil } @@ -401,3 +417,175 @@ func (sd *SignedData) UnmarshalBinary(data []byte) error { } return sd.FromProto(&pData) } + +// Legacy protobuf field numbers for backwards compatibility +const ( + legacyLastCommitHashField = 5 + legacyConsensusHashField = 7 + legacyLastResultsHashField = 9 +) + +// Maximum size of unknown fields to prevent DoS attacks via malicious headers +// with excessive unknown field data. 1MB should be more than sufficient for +// legitimate legacy headers (typical header is ~500 bytes). +const maxUnknownFieldSize = 1024 * 1024 // 1MB + +// Maximum size for individual legacy hash fields. Standard hashes are 32 bytes, +// but we allow up to 1KB for flexibility with different hash algorithms. +const maxLegacyHashSize = 1024 // 1KB + +func decodeLegacyHeaderFields(pHeader *pb.Header) (*LegacyHeaderFields, error) { + unknown := pHeader.ProtoReflect().GetUnknown() + if len(unknown) == 0 { + return nil, nil + } + + // Protect against DoS attacks via headers with massive unknown field data + if len(unknown) > maxUnknownFieldSize { + return nil, fmt.Errorf("unknown fields too large: %d bytes (max %d)", len(unknown), maxUnknownFieldSize) + } + + var legacy LegacyHeaderFields + + for len(unknown) > 0 { + fieldNum, typ, n := protowire.ConsumeTag(unknown) + if n < 0 { + return nil, protowire.ParseError(n) + } + unknown = unknown[n:] + + switch fieldNum { + case legacyLastCommitHashField, legacyConsensusHashField, legacyLastResultsHashField: + if typ != protowire.BytesType { + size := protowire.ConsumeFieldValue(fieldNum, typ, unknown) + if size < 0 { + return nil, protowire.ParseError(size) + } + unknown = unknown[size:] + continue + } + + value, size := protowire.ConsumeBytes(unknown) + if size < 0 { + return nil, protowire.ParseError(size) + } + unknown = unknown[size:] + + // Validate field size to prevent excessive memory allocation + if len(value) > maxLegacyHashSize { + return nil, fmt.Errorf("legacy hash field %d too large: %d bytes (max %d)", + fieldNum, len(value), maxLegacyHashSize) + } + + copied := append([]byte(nil), value...) + + switch fieldNum { + case legacyLastCommitHashField: + legacy.LastCommitHash = copied + case legacyConsensusHashField: + legacy.ConsensusHash = copied + case legacyLastResultsHashField: + legacy.LastResultsHash = copied + } + default: + size := protowire.ConsumeFieldValue(fieldNum, typ, unknown) + if size < 0 { + return nil, protowire.ParseError(size) + } + unknown = unknown[size:] + } + } + + if legacy.IsZero() { + return nil, nil + } + + return &legacy, nil +} + +func appendBytesField(buf []byte, number protowire.Number, value []byte) []byte { + buf = protowire.AppendTag(buf, number, protowire.BytesType) + buf = protowire.AppendVarint(buf, uint64(len(value))) + buf = append(buf, value...) + return buf +} + +func marshalLegacyHeader(h *Header) ([]byte, error) { + if h == nil { + return nil, errors.New("header is nil") + } + + clone := h.Clone() + clone.ApplyLegacyDefaults() + + var payload []byte + + // version + versionBytes, err := proto.Marshal(&pb.Version{ + Block: clone.Version.Block, + App: clone.Version.App, + }) + if err != nil { + return nil, err + } + payload = protowire.AppendTag(payload, 1, protowire.BytesType) + payload = protowire.AppendVarint(payload, uint64(len(versionBytes))) + payload = append(payload, versionBytes...) + + // height + payload = protowire.AppendTag(payload, 2, protowire.VarintType) + payload = protowire.AppendVarint(payload, clone.BaseHeader.Height) + + // time + payload = protowire.AppendTag(payload, 3, protowire.VarintType) + payload = protowire.AppendVarint(payload, clone.BaseHeader.Time) + + // last header hash + if len(clone.LastHeaderHash) > 0 { + payload = appendBytesField(payload, 4, clone.LastHeaderHash) + } + + // last commit hash (legacy) + if len(clone.Legacy.LastCommitHash) > 0 { + payload = appendBytesField(payload, legacyLastCommitHashField, clone.Legacy.LastCommitHash) + } + + // data hash + if len(clone.DataHash) > 0 { + payload = appendBytesField(payload, 6, clone.DataHash) + } + + // consensus hash (legacy) + if len(clone.Legacy.ConsensusHash) > 0 { + payload = appendBytesField(payload, legacyConsensusHashField, clone.Legacy.ConsensusHash) + } + + // app hash + if len(clone.AppHash) > 0 { + payload = appendBytesField(payload, 8, clone.AppHash) + } + + // last results hash (legacy) + if len(clone.Legacy.LastResultsHash) > 0 { + payload = appendBytesField(payload, legacyLastResultsHashField, clone.Legacy.LastResultsHash) + } + + // proposer address + if len(clone.ProposerAddress) > 0 { + payload = appendBytesField(payload, 10, clone.ProposerAddress) + } + + // validator hash + if len(clone.ValidatorHash) > 0 { + payload = appendBytesField(payload, 11, clone.ValidatorHash) + } + + // chain ID + if len(clone.BaseHeader.ChainID) > 0 { + payload = protowire.AppendTag(payload, 12, protowire.BytesType) + payload = protowire.AppendVarint(payload, uint64(len(clone.BaseHeader.ChainID))) + payload = append(payload, clone.BaseHeader.ChainID...) + } + + return payload, nil +} diff --git a/types/signed_header.go b/types/signed_header.go index 6c7d25d75a..ffacbc847a 100644 --- a/types/signed_header.go +++ b/types/signed_header.go @@ -121,31 +121,68 @@ func (sh *SignedHeader) ValidateBasic() error { return ErrProposerAddressMismatch } - var ( - bz []byte - err error - ) + // Track tried payloads using a slice since we have at most 3 attempts. + // This avoids allocating strings for map keys. + tried := make([][]byte, 0, 3) + tryPayload := func(payload []byte) (bool, error) { + if len(payload) == 0 { + return false, nil + } + + // Check if we've already tried this payload + for _, p := range tried { + if bytes.Equal(p, payload) { + return false, nil + } + } + tried = append(tried, payload) - if sh.aggregatorSignatureProvider == nil { - bz, err = DefaultAggregatorNodeSignatureBytesProvider(&sh.Header) + verified, err := sh.Signer.PubKey.Verify(payload, sh.Signature) if err != nil { - return fmt.Errorf("default signature payload provider failed: %w", err) + return false, err } - } else { - bz, err = sh.aggregatorSignatureProvider(&sh.Header) + return verified, nil + } + + if sh.aggregatorSignatureProvider != nil { + bz, err := sh.aggregatorSignatureProvider(&sh.Header) if err != nil { return fmt.Errorf("custom signature payload provider failed: %w", err) } + ok, err := tryPayload(bz) + if err != nil { + return err + } + if ok { + return nil + } + } + + slim, err := sh.Header.MarshalBinary() + if err != nil { + return err + } + ok, err := tryPayload(slim) + if err != nil { + return err + } + if ok { + return nil } - verified, err := sh.Signer.PubKey.Verify(bz, sh.Signature) + legacy, err := sh.MarshalBinaryLegacy() if err != nil { return err } - if !verified { - return ErrSignatureVerificationFailed + ok, err = tryPayload(legacy) + if err != nil { + return err } - return nil + if ok { + return nil + } + + return ErrSignatureVerificationFailed } // ValidateBasicWithData performs basic validator of a signed header, granted data for syncing node. @@ -163,29 +200,66 @@ func (sh *SignedHeader) ValidateBasicWithData(data *Data) error { return ErrProposerAddressMismatch } - var ( - bz []byte - err error - ) + // Track tried payloads using a slice since we have at most 3 attempts. + // This avoids allocating strings for map keys. + tried := make([][]byte, 0, 3) + tryPayload := func(payload []byte) (bool, error) { + if len(payload) == 0 { + return false, nil + } + + // Check if we've already tried this payload + for _, p := range tried { + if bytes.Equal(p, payload) { + return false, nil + } + } + tried = append(tried, payload) - if sh.syncNodeSignatureBytesProvider == nil { - bz, err = DefaultSyncNodeSignatureBytesProvider(context.Background(), &sh.Header, nil) + verified, err := sh.Signer.PubKey.Verify(payload, sh.Signature) if err != nil { - return fmt.Errorf("default signature payload provider failed: %w", err) + return false, err } - } else { - bz, err = sh.syncNodeSignatureBytesProvider(context.Background(), &sh.Header, data) + return verified, nil + } + + if sh.syncNodeSignatureBytesProvider != nil { + bz, err := sh.syncNodeSignatureBytesProvider(context.Background(), &sh.Header, data) if err != nil { return fmt.Errorf("custom signature payload provider failed: %w", err) } + ok, err := tryPayload(bz) + if err != nil { + return err + } + if ok { + return nil + } + } + + slim, err := sh.Header.MarshalBinary() + if err != nil { + return err + } + ok, err := tryPayload(slim) + if err != nil { + return err + } + if ok { + return nil } - verified, err := sh.Signer.PubKey.Verify(bz, sh.Signature) + legacy, err := sh.MarshalBinaryLegacy() if err != nil { return err } - if !verified { - return ErrSignatureVerificationFailed + ok, err = tryPayload(legacy) + if err != nil { + return err } - return nil + if ok { + return nil + } + + return ErrSignatureVerificationFailed } diff --git a/types/signed_header_test.go b/types/signed_header_test.go index cb5995739e..f3f7a50f79 100644 --- a/types/signed_header_test.go +++ b/types/signed_header_test.go @@ -1,6 +1,7 @@ package types import ( + "crypto/rand" "fmt" "testing" @@ -246,3 +247,92 @@ func testValidateBasic(t *testing.T, untrustedAdj *SignedHeader, privKey crypto. }) } } + +func TestSignedHeaderValidateBasic_LegacyFallback(t *testing.T) { + privKey, pubKey, err := crypto.GenerateSecp256k1Key(rand.Reader) + require.NoError(t, err) + + address := KeyAddress(pubKey) + + header := Header{ + BaseHeader: BaseHeader{ + ChainID: "chain-id", + Height: 2, + Time: 123456789, + }, + Version: Version{Block: 1, App: 1}, + LastHeaderHash: make(Hash, 32), + DataHash: make(Hash, 32), + AppHash: make(Hash, 32), + ValidatorHash: make(Hash, 32), + ProposerAddress: append([]byte(nil), address...), + Legacy: &LegacyHeaderFields{ + ConsensusHash: make(Hash, 32), + }, + } + + legacyBytes, err := header.MarshalBinaryLegacy() + require.NoError(t, err) + + signature, err := privKey.Sign(legacyBytes) + require.NoError(t, err) + + var decoded Header + require.NoError(t, decoded.UnmarshalBinary(legacyBytes)) + + signed := SignedHeader{ + Header: decoded, + Signature: Signature( + append([]byte(nil), signature...), + ), + Signer: Signer{ + PubKey: pubKey, + Address: append([]byte(nil), address...), + }, + } + + require.NoError(t, signed.ValidateBasic()) +} + +func TestSignedHeaderValidateBasic_SlimFormat(t *testing.T) { + privKey, pubKey, err := crypto.GenerateSecp256k1Key(rand.Reader) + require.NoError(t, err) + + address := KeyAddress(pubKey) + + header := Header{ + BaseHeader: BaseHeader{ + ChainID: "chain-id", + Height: 5, + Time: 987654321, + }, + Version: Version{Block: 1, App: 1}, + LastHeaderHash: make(Hash, 32), + DataHash: make(Hash, 32), + AppHash: make(Hash, 32), + ValidatorHash: make(Hash, 32), + ProposerAddress: append([]byte(nil), address...), + } + + payload, err := header.MarshalBinary() + require.NoError(t, err) + + signature, err := privKey.Sign(payload) + require.NoError(t, err) + + var decoded Header + require.NoError(t, decoded.UnmarshalBinary(payload)) + + signed := SignedHeader{ + Header: decoded, + Signature: Signature( + append([]byte(nil), signature...), + ), + Signer: Signer{ + PubKey: pubKey, + Address: append([]byte(nil), address...), + }, + } + + require.NoError(t, signed.ValidateBasic()) +}