diff --git a/gen/supernode/storage_challenge.pb.go b/gen/supernode/storage_challenge.pb.go index e62e8be7..c8591176 100644 --- a/gen/supernode/storage_challenge.pb.go +++ b/gen/supernode/storage_challenge.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.9 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v4.25.1 // source: supernode/storage_challenge.proto package supernode @@ -11,7 +11,6 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -21,26 +20,371 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// ByteRange represents a half-open byte range [start, end) into an artifact. +type ByteRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + End uint64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` // exclusive +} + +func (x *ByteRange) Reset() { + *x = ByteRange{} + if protoimpl.UnsafeEnabled { + mi := &file_supernode_storage_challenge_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ByteRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ByteRange) ProtoMessage() {} + +func (x *ByteRange) ProtoReflect() protoreflect.Message { + mi := &file_supernode_storage_challenge_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ByteRange.ProtoReflect.Descriptor instead. +func (*ByteRange) Descriptor() ([]byte, []int) { + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{0} +} + +func (x *ByteRange) GetStart() uint64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *ByteRange) GetEnd() uint64 { + if x != nil { + return x.End + } + return 0 +} + +type GetCompoundProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + Seed []byte `protobuf:"bytes,3,opt,name=seed,proto3" json:"seed,omitempty"` + TicketId string `protobuf:"bytes,4,opt,name=ticket_id,json=ticketId,proto3" json:"ticket_id,omitempty"` + TargetSupernodeAccount string `protobuf:"bytes,5,opt,name=target_supernode_account,json=targetSupernodeAccount,proto3" json:"target_supernode_account,omitempty"` + ChallengerAccount string `protobuf:"bytes,6,opt,name=challenger_account,json=challengerAccount,proto3" json:"challenger_account,omitempty"` + ObserverAccounts []string `protobuf:"bytes,7,rep,name=observer_accounts,json=observerAccounts,proto3" json:"observer_accounts,omitempty"` + ArtifactClass uint32 `protobuf:"varint,8,opt,name=artifact_class,json=artifactClass,proto3" json:"artifact_class,omitempty"` // mirrors audittypes.StorageProofArtifactClass + ArtifactOrdinal uint32 `protobuf:"varint,9,opt,name=artifact_ordinal,json=artifactOrdinal,proto3" json:"artifact_ordinal,omitempty"` + ArtifactCount uint32 `protobuf:"varint,10,opt,name=artifact_count,json=artifactCount,proto3" json:"artifact_count,omitempty"` + BucketType uint32 `protobuf:"varint,11,opt,name=bucket_type,json=bucketType,proto3" json:"bucket_type,omitempty"` // mirrors audittypes.StorageProofBucketType + ArtifactKey string `protobuf:"bytes,12,opt,name=artifact_key,json=artifactKey,proto3" json:"artifact_key,omitempty"` + ArtifactSize uint64 `protobuf:"varint,13,opt,name=artifact_size,json=artifactSize,proto3" json:"artifact_size,omitempty"` + Ranges []*ByteRange `protobuf:"bytes,14,rep,name=ranges,proto3" json:"ranges,omitempty"` // exactly LEP6CompoundRangesPerArtifact (=4); each size LEP6CompoundRangeLenBytes (=256) +} + +func (x *GetCompoundProofRequest) Reset() { + *x = GetCompoundProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_supernode_storage_challenge_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCompoundProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCompoundProofRequest) ProtoMessage() {} + +func (x *GetCompoundProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_storage_challenge_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCompoundProofRequest.ProtoReflect.Descriptor instead. +func (*GetCompoundProofRequest) Descriptor() ([]byte, []int) { + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{1} +} + +func (x *GetCompoundProofRequest) GetChallengeId() string { + if x != nil { + return x.ChallengeId + } + return "" +} + +func (x *GetCompoundProofRequest) GetEpochId() uint64 { + if x != nil { + return x.EpochId + } + return 0 +} + +func (x *GetCompoundProofRequest) GetSeed() []byte { + if x != nil { + return x.Seed + } + return nil +} + +func (x *GetCompoundProofRequest) GetTicketId() string { + if x != nil { + return x.TicketId + } + return "" +} + +func (x *GetCompoundProofRequest) GetTargetSupernodeAccount() string { + if x != nil { + return x.TargetSupernodeAccount + } + return "" +} + +func (x *GetCompoundProofRequest) GetChallengerAccount() string { + if x != nil { + return x.ChallengerAccount + } + return "" +} + +func (x *GetCompoundProofRequest) GetObserverAccounts() []string { + if x != nil { + return x.ObserverAccounts + } + return nil +} + +func (x *GetCompoundProofRequest) GetArtifactClass() uint32 { + if x != nil { + return x.ArtifactClass + } + return 0 +} + +func (x *GetCompoundProofRequest) GetArtifactOrdinal() uint32 { + if x != nil { + return x.ArtifactOrdinal + } + return 0 +} + +func (x *GetCompoundProofRequest) GetArtifactCount() uint32 { + if x != nil { + return x.ArtifactCount + } + return 0 +} + +func (x *GetCompoundProofRequest) GetBucketType() uint32 { + if x != nil { + return x.BucketType + } + return 0 +} + +func (x *GetCompoundProofRequest) GetArtifactKey() string { + if x != nil { + return x.ArtifactKey + } + return "" +} + +func (x *GetCompoundProofRequest) GetArtifactSize() uint64 { + if x != nil { + return x.ArtifactSize + } + return 0 +} + +func (x *GetCompoundProofRequest) GetRanges() []*ByteRange { + if x != nil { + return x.Ranges + } + return nil +} + +type GetCompoundProofResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + TicketId string `protobuf:"bytes,3,opt,name=ticket_id,json=ticketId,proto3" json:"ticket_id,omitempty"` + ArtifactClass uint32 `protobuf:"varint,4,opt,name=artifact_class,json=artifactClass,proto3" json:"artifact_class,omitempty"` + ArtifactOrdinal uint32 `protobuf:"varint,5,opt,name=artifact_ordinal,json=artifactOrdinal,proto3" json:"artifact_ordinal,omitempty"` + BucketType uint32 `protobuf:"varint,6,opt,name=bucket_type,json=bucketType,proto3" json:"bucket_type,omitempty"` + ArtifactKey string `protobuf:"bytes,7,opt,name=artifact_key,json=artifactKey,proto3" json:"artifact_key,omitempty"` + RangeBytes [][]byte `protobuf:"bytes,8,rep,name=range_bytes,json=rangeBytes,proto3" json:"range_bytes,omitempty"` // i-th matches i-th request range + ProofHashHex string `protobuf:"bytes,9,opt,name=proof_hash_hex,json=proofHashHex,proto3" json:"proof_hash_hex,omitempty"` // BLAKE3(concat(range_bytes...)) lowercase hex + RecipientSignature string `protobuf:"bytes,10,opt,name=recipient_signature,json=recipientSignature,proto3" json:"recipient_signature,omitempty"` // recipient's keyring signature + Ok bool `protobuf:"varint,11,opt,name=ok,proto3" json:"ok,omitempty"` + Error string `protobuf:"bytes,12,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetCompoundProofResponse) Reset() { + *x = GetCompoundProofResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_supernode_storage_challenge_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCompoundProofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCompoundProofResponse) ProtoMessage() {} + +func (x *GetCompoundProofResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_storage_challenge_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCompoundProofResponse.ProtoReflect.Descriptor instead. +func (*GetCompoundProofResponse) Descriptor() ([]byte, []int) { + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{2} +} + +func (x *GetCompoundProofResponse) GetChallengeId() string { + if x != nil { + return x.ChallengeId + } + return "" +} + +func (x *GetCompoundProofResponse) GetEpochId() uint64 { + if x != nil { + return x.EpochId + } + return 0 +} + +func (x *GetCompoundProofResponse) GetTicketId() string { + if x != nil { + return x.TicketId + } + return "" +} + +func (x *GetCompoundProofResponse) GetArtifactClass() uint32 { + if x != nil { + return x.ArtifactClass + } + return 0 +} + +func (x *GetCompoundProofResponse) GetArtifactOrdinal() uint32 { + if x != nil { + return x.ArtifactOrdinal + } + return 0 +} + +func (x *GetCompoundProofResponse) GetBucketType() uint32 { + if x != nil { + return x.BucketType + } + return 0 +} + +func (x *GetCompoundProofResponse) GetArtifactKey() string { + if x != nil { + return x.ArtifactKey + } + return "" +} + +func (x *GetCompoundProofResponse) GetRangeBytes() [][]byte { + if x != nil { + return x.RangeBytes + } + return nil +} + +func (x *GetCompoundProofResponse) GetProofHashHex() string { + if x != nil { + return x.ProofHashHex + } + return "" +} + +func (x *GetCompoundProofResponse) GetRecipientSignature() string { + if x != nil { + return x.RecipientSignature + } + return "" +} + +func (x *GetCompoundProofResponse) GetOk() bool { + if x != nil { + return x.Ok + } + return false +} + +func (x *GetCompoundProofResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + type GetSliceProofRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` - EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` - Seed []byte `protobuf:"bytes,3,opt,name=seed,proto3" json:"seed,omitempty"` - FileKey string `protobuf:"bytes,4,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` - RequestedStart uint64 `protobuf:"varint,5,opt,name=requested_start,json=requestedStart,proto3" json:"requested_start,omitempty"` - RequestedEnd uint64 `protobuf:"varint,6,opt,name=requested_end,json=requestedEnd,proto3" json:"requested_end,omitempty"` - ChallengerId string `protobuf:"bytes,7,opt,name=challenger_id,json=challengerId,proto3" json:"challenger_id,omitempty"` - RecipientId string `protobuf:"bytes,8,opt,name=recipient_id,json=recipientId,proto3" json:"recipient_id,omitempty"` - ObserverIds []string `protobuf:"bytes,9,rep,name=observer_ids,json=observerIds,proto3" json:"observer_ids,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + Seed []byte `protobuf:"bytes,3,opt,name=seed,proto3" json:"seed,omitempty"` + FileKey string `protobuf:"bytes,4,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + RequestedStart uint64 `protobuf:"varint,5,opt,name=requested_start,json=requestedStart,proto3" json:"requested_start,omitempty"` + RequestedEnd uint64 `protobuf:"varint,6,opt,name=requested_end,json=requestedEnd,proto3" json:"requested_end,omitempty"` + ChallengerId string `protobuf:"bytes,7,opt,name=challenger_id,json=challengerId,proto3" json:"challenger_id,omitempty"` + RecipientId string `protobuf:"bytes,8,opt,name=recipient_id,json=recipientId,proto3" json:"recipient_id,omitempty"` + ObserverIds []string `protobuf:"bytes,9,rep,name=observer_ids,json=observerIds,proto3" json:"observer_ids,omitempty"` } func (x *GetSliceProofRequest) Reset() { *x = GetSliceProofRequest{} - mi := &file_supernode_storage_challenge_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_storage_challenge_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetSliceProofRequest) String() string { @@ -50,8 +394,8 @@ func (x *GetSliceProofRequest) String() string { func (*GetSliceProofRequest) ProtoMessage() {} func (x *GetSliceProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_supernode_storage_challenge_proto_msgTypes[0] - if x != nil { + mi := &file_supernode_storage_challenge_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -63,7 +407,7 @@ func (x *GetSliceProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSliceProofRequest.ProtoReflect.Descriptor instead. func (*GetSliceProofRequest) Descriptor() ([]byte, []int) { - return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{0} + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{3} } func (x *GetSliceProofRequest) GetChallengeId() string { @@ -130,26 +474,29 @@ func (x *GetSliceProofRequest) GetObserverIds() []string { } type GetSliceProofResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` - EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` - FileKey string `protobuf:"bytes,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` - Start uint64 `protobuf:"varint,4,opt,name=start,proto3" json:"start,omitempty"` - End uint64 `protobuf:"varint,5,opt,name=end,proto3" json:"end,omitempty"` - RecipientId string `protobuf:"bytes,6,opt,name=recipient_id,json=recipientId,proto3" json:"recipient_id,omitempty"` - Slice []byte `protobuf:"bytes,7,opt,name=slice,proto3" json:"slice,omitempty"` - ProofHashHex string `protobuf:"bytes,8,opt,name=proof_hash_hex,json=proofHashHex,proto3" json:"proof_hash_hex,omitempty"` - Ok bool `protobuf:"varint,9,opt,name=ok,proto3" json:"ok,omitempty"` - Error string `protobuf:"bytes,10,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + FileKey string `protobuf:"bytes,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Start uint64 `protobuf:"varint,4,opt,name=start,proto3" json:"start,omitempty"` + End uint64 `protobuf:"varint,5,opt,name=end,proto3" json:"end,omitempty"` + RecipientId string `protobuf:"bytes,6,opt,name=recipient_id,json=recipientId,proto3" json:"recipient_id,omitempty"` + Slice []byte `protobuf:"bytes,7,opt,name=slice,proto3" json:"slice,omitempty"` + ProofHashHex string `protobuf:"bytes,8,opt,name=proof_hash_hex,json=proofHashHex,proto3" json:"proof_hash_hex,omitempty"` + Ok bool `protobuf:"varint,9,opt,name=ok,proto3" json:"ok,omitempty"` + Error string `protobuf:"bytes,10,opt,name=error,proto3" json:"error,omitempty"` } func (x *GetSliceProofResponse) Reset() { *x = GetSliceProofResponse{} - mi := &file_supernode_storage_challenge_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_storage_challenge_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetSliceProofResponse) String() string { @@ -159,8 +506,8 @@ func (x *GetSliceProofResponse) String() string { func (*GetSliceProofResponse) ProtoMessage() {} func (x *GetSliceProofResponse) ProtoReflect() protoreflect.Message { - mi := &file_supernode_storage_challenge_proto_msgTypes[1] - if x != nil { + mi := &file_supernode_storage_challenge_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -172,7 +519,7 @@ func (x *GetSliceProofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSliceProofResponse.ProtoReflect.Descriptor instead. func (*GetSliceProofResponse) Descriptor() ([]byte, []int) { - return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{1} + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{4} } func (x *GetSliceProofResponse) GetChallengeId() string { @@ -246,25 +593,28 @@ func (x *GetSliceProofResponse) GetError() string { } type VerifySliceProofRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` - EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` - FileKey string `protobuf:"bytes,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` - Start uint64 `protobuf:"varint,4,opt,name=start,proto3" json:"start,omitempty"` - End uint64 `protobuf:"varint,5,opt,name=end,proto3" json:"end,omitempty"` - Slice []byte `protobuf:"bytes,6,opt,name=slice,proto3" json:"slice,omitempty"` - ProofHashHex string `protobuf:"bytes,7,opt,name=proof_hash_hex,json=proofHashHex,proto3" json:"proof_hash_hex,omitempty"` - ChallengerId string `protobuf:"bytes,8,opt,name=challenger_id,json=challengerId,proto3" json:"challenger_id,omitempty"` - RecipientId string `protobuf:"bytes,9,opt,name=recipient_id,json=recipientId,proto3" json:"recipient_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + FileKey string `protobuf:"bytes,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Start uint64 `protobuf:"varint,4,opt,name=start,proto3" json:"start,omitempty"` + End uint64 `protobuf:"varint,5,opt,name=end,proto3" json:"end,omitempty"` + Slice []byte `protobuf:"bytes,6,opt,name=slice,proto3" json:"slice,omitempty"` + ProofHashHex string `protobuf:"bytes,7,opt,name=proof_hash_hex,json=proofHashHex,proto3" json:"proof_hash_hex,omitempty"` + ChallengerId string `protobuf:"bytes,8,opt,name=challenger_id,json=challengerId,proto3" json:"challenger_id,omitempty"` + RecipientId string `protobuf:"bytes,9,opt,name=recipient_id,json=recipientId,proto3" json:"recipient_id,omitempty"` } func (x *VerifySliceProofRequest) Reset() { *x = VerifySliceProofRequest{} - mi := &file_supernode_storage_challenge_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_storage_challenge_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VerifySliceProofRequest) String() string { @@ -274,8 +624,8 @@ func (x *VerifySliceProofRequest) String() string { func (*VerifySliceProofRequest) ProtoMessage() {} func (x *VerifySliceProofRequest) ProtoReflect() protoreflect.Message { - mi := &file_supernode_storage_challenge_proto_msgTypes[2] - if x != nil { + mi := &file_supernode_storage_challenge_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -287,7 +637,7 @@ func (x *VerifySliceProofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifySliceProofRequest.ProtoReflect.Descriptor instead. func (*VerifySliceProofRequest) Descriptor() ([]byte, []int) { - return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{2} + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{5} } func (x *VerifySliceProofRequest) GetChallengeId() string { @@ -354,21 +704,24 @@ func (x *VerifySliceProofRequest) GetRecipientId() string { } type VerifySliceProofResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` - EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` - ObserverId string `protobuf:"bytes,3,opt,name=observer_id,json=observerId,proto3" json:"observer_id,omitempty"` - Ok bool `protobuf:"varint,4,opt,name=ok,proto3" json:"ok,omitempty"` - Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + ObserverId string `protobuf:"bytes,3,opt,name=observer_id,json=observerId,proto3" json:"observer_id,omitempty"` + Ok bool `protobuf:"varint,4,opt,name=ok,proto3" json:"ok,omitempty"` + Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` } func (x *VerifySliceProofResponse) Reset() { *x = VerifySliceProofResponse{} - mi := &file_supernode_storage_challenge_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_supernode_storage_challenge_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *VerifySliceProofResponse) String() string { @@ -378,8 +731,8 @@ func (x *VerifySliceProofResponse) String() string { func (*VerifySliceProofResponse) ProtoMessage() {} func (x *VerifySliceProofResponse) ProtoReflect() protoreflect.Message { - mi := &file_supernode_storage_challenge_proto_msgTypes[3] - if x != nil { + mi := &file_supernode_storage_challenge_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -391,7 +744,7 @@ func (x *VerifySliceProofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifySliceProofResponse.ProtoReflect.Descriptor instead. func (*VerifySliceProofResponse) Descriptor() ([]byte, []int) { - return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{3} + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{6} } func (x *VerifySliceProofResponse) GetChallengeId() string { @@ -431,81 +784,202 @@ func (x *VerifySliceProofResponse) GetError() string { var File_supernode_storage_challenge_proto protoreflect.FileDescriptor -const file_supernode_storage_challenge_proto_rawDesc = "" + - "\n" + - "!supernode/storage_challenge.proto\x12\tsupernode\"\xbc\x02\n" + - "\x14GetSliceProofRequest\x12!\n" + - "\fchallenge_id\x18\x01 \x01(\tR\vchallengeId\x12\x19\n" + - "\bepoch_id\x18\x02 \x01(\x04R\aepochId\x12\x12\n" + - "\x04seed\x18\x03 \x01(\fR\x04seed\x12\x19\n" + - "\bfile_key\x18\x04 \x01(\tR\afileKey\x12'\n" + - "\x0frequested_start\x18\x05 \x01(\x04R\x0erequestedStart\x12#\n" + - "\rrequested_end\x18\x06 \x01(\x04R\frequestedEnd\x12#\n" + - "\rchallenger_id\x18\a \x01(\tR\fchallengerId\x12!\n" + - "\frecipient_id\x18\b \x01(\tR\vrecipientId\x12!\n" + - "\fobserver_ids\x18\t \x03(\tR\vobserverIds\"\x9d\x02\n" + - "\x15GetSliceProofResponse\x12!\n" + - "\fchallenge_id\x18\x01 \x01(\tR\vchallengeId\x12\x19\n" + - "\bepoch_id\x18\x02 \x01(\x04R\aepochId\x12\x19\n" + - "\bfile_key\x18\x03 \x01(\tR\afileKey\x12\x14\n" + - "\x05start\x18\x04 \x01(\x04R\x05start\x12\x10\n" + - "\x03end\x18\x05 \x01(\x04R\x03end\x12!\n" + - "\frecipient_id\x18\x06 \x01(\tR\vrecipientId\x12\x14\n" + - "\x05slice\x18\a \x01(\fR\x05slice\x12$\n" + - "\x0eproof_hash_hex\x18\b \x01(\tR\fproofHashHex\x12\x0e\n" + - "\x02ok\x18\t \x01(\bR\x02ok\x12\x14\n" + - "\x05error\x18\n" + - " \x01(\tR\x05error\"\x9e\x02\n" + - "\x17VerifySliceProofRequest\x12!\n" + - "\fchallenge_id\x18\x01 \x01(\tR\vchallengeId\x12\x19\n" + - "\bepoch_id\x18\x02 \x01(\x04R\aepochId\x12\x19\n" + - "\bfile_key\x18\x03 \x01(\tR\afileKey\x12\x14\n" + - "\x05start\x18\x04 \x01(\x04R\x05start\x12\x10\n" + - "\x03end\x18\x05 \x01(\x04R\x03end\x12\x14\n" + - "\x05slice\x18\x06 \x01(\fR\x05slice\x12$\n" + - "\x0eproof_hash_hex\x18\a \x01(\tR\fproofHashHex\x12#\n" + - "\rchallenger_id\x18\b \x01(\tR\fchallengerId\x12!\n" + - "\frecipient_id\x18\t \x01(\tR\vrecipientId\"\x9f\x01\n" + - "\x18VerifySliceProofResponse\x12!\n" + - "\fchallenge_id\x18\x01 \x01(\tR\vchallengeId\x12\x19\n" + - "\bepoch_id\x18\x02 \x01(\x04R\aepochId\x12\x1f\n" + - "\vobserver_id\x18\x03 \x01(\tR\n" + - "observerId\x12\x0e\n" + - "\x02ok\x18\x04 \x01(\bR\x02ok\x12\x14\n" + - "\x05error\x18\x05 \x01(\tR\x05error2\xce\x01\n" + - "\x17StorageChallengeService\x12T\n" + - "\rGetSliceProof\x12\x1f.supernode.GetSliceProofRequest\x1a .supernode.GetSliceProofResponse\"\x00\x12]\n" + - "\x10VerifySliceProof\x12\".supernode.VerifySliceProofRequest\x1a#.supernode.VerifySliceProofResponse\"\x00B6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" +var file_supernode_storage_challenge_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x33, + 0x0a, 0x09, 0x42, 0x79, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, + 0x65, 0x6e, 0x64, 0x22, 0xae, 0x04, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, + 0x75, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, + 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x65, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x65, 0x65, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x38, + 0x0a, 0x18, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x16, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x68, 0x61, 0x6c, + 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x72, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x61, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x61, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x4f, + 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x4b, 0x65, + 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x22, 0xa9, 0x03, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, + 0x67, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, + 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x61, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x4f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1f, + 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x4b, + 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x5f, 0x68, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x48, 0x61, 0x73, 0x68, 0x48, 0x65, 0x78, 0x12, 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x63, + 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0xbc, 0x02, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x68, 0x61, + 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x65, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x66, + 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x45, 0x6e, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x68, 0x61, + 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x63, + 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, + 0x9d, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x68, 0x61, + 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, + 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x6c, + 0x69, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x5f, 0x68, 0x65, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x48, 0x61, 0x73, 0x68, 0x48, 0x65, 0x78, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, + 0x9e, 0x02, 0x0a, 0x17, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x12, 0x19, + 0x0a, 0x08, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x07, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, + 0x65, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x6c, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x6c, 0x69, + 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x5f, 0x68, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x48, 0x61, 0x73, 0x68, 0x48, 0x65, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6c, + 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, + 0x0c, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, + 0x22, 0x9f, 0x01, 0x0a, 0x18, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x53, 0x6c, 0x69, 0x63, 0x65, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x49, 0x64, + 0x12, 0x19, 0x0a, 0x08, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02, + 0x6f, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x32, 0xad, 0x02, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x68, + 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x54, + 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, + 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x6c, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x20, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x6c, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x53, 0x6c, + 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x53, 0x6c, 0x69, 0x63, 0x65, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x53, + 0x6c, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x75, + 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x22, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x75, 0x6e, 0x64, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6f, + 0x75, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, + 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} var ( file_supernode_storage_challenge_proto_rawDescOnce sync.Once - file_supernode_storage_challenge_proto_rawDescData []byte + file_supernode_storage_challenge_proto_rawDescData = file_supernode_storage_challenge_proto_rawDesc ) func file_supernode_storage_challenge_proto_rawDescGZIP() []byte { file_supernode_storage_challenge_proto_rawDescOnce.Do(func() { - file_supernode_storage_challenge_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_storage_challenge_proto_rawDesc), len(file_supernode_storage_challenge_proto_rawDesc))) + file_supernode_storage_challenge_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_storage_challenge_proto_rawDescData) }) return file_supernode_storage_challenge_proto_rawDescData } -var file_supernode_storage_challenge_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_supernode_storage_challenge_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_supernode_storage_challenge_proto_goTypes = []any{ - (*GetSliceProofRequest)(nil), // 0: supernode.GetSliceProofRequest - (*GetSliceProofResponse)(nil), // 1: supernode.GetSliceProofResponse - (*VerifySliceProofRequest)(nil), // 2: supernode.VerifySliceProofRequest - (*VerifySliceProofResponse)(nil), // 3: supernode.VerifySliceProofResponse + (*ByteRange)(nil), // 0: supernode.ByteRange + (*GetCompoundProofRequest)(nil), // 1: supernode.GetCompoundProofRequest + (*GetCompoundProofResponse)(nil), // 2: supernode.GetCompoundProofResponse + (*GetSliceProofRequest)(nil), // 3: supernode.GetSliceProofRequest + (*GetSliceProofResponse)(nil), // 4: supernode.GetSliceProofResponse + (*VerifySliceProofRequest)(nil), // 5: supernode.VerifySliceProofRequest + (*VerifySliceProofResponse)(nil), // 6: supernode.VerifySliceProofResponse } var file_supernode_storage_challenge_proto_depIdxs = []int32{ - 0, // 0: supernode.StorageChallengeService.GetSliceProof:input_type -> supernode.GetSliceProofRequest - 2, // 1: supernode.StorageChallengeService.VerifySliceProof:input_type -> supernode.VerifySliceProofRequest - 1, // 2: supernode.StorageChallengeService.GetSliceProof:output_type -> supernode.GetSliceProofResponse - 3, // 3: supernode.StorageChallengeService.VerifySliceProof:output_type -> supernode.VerifySliceProofResponse - 2, // [2:4] is the sub-list for method output_type - 0, // [0:2] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: supernode.GetCompoundProofRequest.ranges:type_name -> supernode.ByteRange + 3, // 1: supernode.StorageChallengeService.GetSliceProof:input_type -> supernode.GetSliceProofRequest + 5, // 2: supernode.StorageChallengeService.VerifySliceProof:input_type -> supernode.VerifySliceProofRequest + 1, // 3: supernode.StorageChallengeService.GetCompoundProof:input_type -> supernode.GetCompoundProofRequest + 4, // 4: supernode.StorageChallengeService.GetSliceProof:output_type -> supernode.GetSliceProofResponse + 6, // 5: supernode.StorageChallengeService.VerifySliceProof:output_type -> supernode.VerifySliceProofResponse + 2, // 6: supernode.StorageChallengeService.GetCompoundProof:output_type -> supernode.GetCompoundProofResponse + 4, // [4:7] is the sub-list for method output_type + 1, // [1:4] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_supernode_storage_challenge_proto_init() } @@ -513,13 +987,99 @@ func file_supernode_storage_challenge_proto_init() { if File_supernode_storage_challenge_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_supernode_storage_challenge_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ByteRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_storage_challenge_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetCompoundProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_storage_challenge_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetCompoundProofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_storage_challenge_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetSliceProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_storage_challenge_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*GetSliceProofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_storage_challenge_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*VerifySliceProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_supernode_storage_challenge_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*VerifySliceProofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_storage_challenge_proto_rawDesc), len(file_supernode_storage_challenge_proto_rawDesc)), + RawDescriptor: file_supernode_storage_challenge_proto_rawDesc, NumEnums: 0, - NumMessages: 4, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, @@ -528,6 +1088,7 @@ func file_supernode_storage_challenge_proto_init() { MessageInfos: file_supernode_storage_challenge_proto_msgTypes, }.Build() File_supernode_storage_challenge_proto = out.File + file_supernode_storage_challenge_proto_rawDesc = nil file_supernode_storage_challenge_proto_goTypes = nil file_supernode_storage_challenge_proto_depIdxs = nil } diff --git a/gen/supernode/storage_challenge_grpc.pb.go b/gen/supernode/storage_challenge_grpc.pb.go index 0844b73d..1b67c0e1 100644 --- a/gen/supernode/storage_challenge_grpc.pb.go +++ b/gen/supernode/storage_challenge_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.5.1 -// - protoc v3.21.12 +// - protoc v4.25.1 // source: supernode/storage_challenge.proto package supernode @@ -21,6 +21,7 @@ const _ = grpc.SupportPackageIsVersion9 const ( StorageChallengeService_GetSliceProof_FullMethodName = "/supernode.StorageChallengeService/GetSliceProof" StorageChallengeService_VerifySliceProof_FullMethodName = "/supernode.StorageChallengeService/VerifySliceProof" + StorageChallengeService_GetCompoundProof_FullMethodName = "/supernode.StorageChallengeService/GetCompoundProof" ) // StorageChallengeServiceClient is the client API for StorageChallengeService service. @@ -31,6 +32,7 @@ const ( type StorageChallengeServiceClient interface { GetSliceProof(ctx context.Context, in *GetSliceProofRequest, opts ...grpc.CallOption) (*GetSliceProofResponse, error) VerifySliceProof(ctx context.Context, in *VerifySliceProofRequest, opts ...grpc.CallOption) (*VerifySliceProofResponse, error) + GetCompoundProof(ctx context.Context, in *GetCompoundProofRequest, opts ...grpc.CallOption) (*GetCompoundProofResponse, error) } type storageChallengeServiceClient struct { @@ -61,6 +63,16 @@ func (c *storageChallengeServiceClient) VerifySliceProof(ctx context.Context, in return out, nil } +func (c *storageChallengeServiceClient) GetCompoundProof(ctx context.Context, in *GetCompoundProofRequest, opts ...grpc.CallOption) (*GetCompoundProofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetCompoundProofResponse) + err := c.cc.Invoke(ctx, StorageChallengeService_GetCompoundProof_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // StorageChallengeServiceServer is the server API for StorageChallengeService service. // All implementations must embed UnimplementedStorageChallengeServiceServer // for forward compatibility. @@ -69,6 +81,7 @@ func (c *storageChallengeServiceClient) VerifySliceProof(ctx context.Context, in type StorageChallengeServiceServer interface { GetSliceProof(context.Context, *GetSliceProofRequest) (*GetSliceProofResponse, error) VerifySliceProof(context.Context, *VerifySliceProofRequest) (*VerifySliceProofResponse, error) + GetCompoundProof(context.Context, *GetCompoundProofRequest) (*GetCompoundProofResponse, error) mustEmbedUnimplementedStorageChallengeServiceServer() } @@ -85,6 +98,9 @@ func (UnimplementedStorageChallengeServiceServer) GetSliceProof(context.Context, func (UnimplementedStorageChallengeServiceServer) VerifySliceProof(context.Context, *VerifySliceProofRequest) (*VerifySliceProofResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VerifySliceProof not implemented") } +func (UnimplementedStorageChallengeServiceServer) GetCompoundProof(context.Context, *GetCompoundProofRequest) (*GetCompoundProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCompoundProof not implemented") +} func (UnimplementedStorageChallengeServiceServer) mustEmbedUnimplementedStorageChallengeServiceServer() { } func (UnimplementedStorageChallengeServiceServer) testEmbeddedByValue() {} @@ -143,6 +159,24 @@ func _StorageChallengeService_VerifySliceProof_Handler(srv interface{}, ctx cont return interceptor(ctx, in, info, handler) } +func _StorageChallengeService_GetCompoundProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCompoundProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageChallengeServiceServer).GetCompoundProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageChallengeService_GetCompoundProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageChallengeServiceServer).GetCompoundProof(ctx, req.(*GetCompoundProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + // StorageChallengeService_ServiceDesc is the grpc.ServiceDesc for StorageChallengeService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -158,6 +192,10 @@ var StorageChallengeService_ServiceDesc = grpc.ServiceDesc{ MethodName: "VerifySliceProof", Handler: _StorageChallengeService_VerifySliceProof_Handler, }, + { + MethodName: "GetCompoundProof", + Handler: _StorageChallengeService_GetCompoundProof_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "supernode/storage_challenge.proto", diff --git a/pkg/lumera/modules/action/action_mock.go b/pkg/lumera/modules/action/action_mock.go index a4524fa3..e993fe40 100644 --- a/pkg/lumera/modules/action/action_mock.go +++ b/pkg/lumera/modules/action/action_mock.go @@ -85,3 +85,18 @@ func (mr *MockModuleMockRecorder) GetParams(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetParams", reflect.TypeOf((*MockModule)(nil).GetParams), ctx) } + +// ListActionsBySuperNode mocks base method. +func (m *MockModule) ListActionsBySuperNode(ctx context.Context, superNodeAddress string) (*types.QueryListActionsBySuperNodeResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListActionsBySuperNode", ctx, superNodeAddress) + ret0, _ := ret[0].(*types.QueryListActionsBySuperNodeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListActionsBySuperNode indicates an expected call of ListActionsBySuperNode. +func (mr *MockModuleMockRecorder) ListActionsBySuperNode(ctx, superNodeAddress any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListActionsBySuperNode", reflect.TypeOf((*MockModule)(nil).ListActionsBySuperNode), ctx, superNodeAddress) +} diff --git a/pkg/lumera/modules/action/impl.go b/pkg/lumera/modules/action/impl.go index ccf4dfea..6295e4f6 100644 --- a/pkg/lumera/modules/action/impl.go +++ b/pkg/lumera/modules/action/impl.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/cosmos/cosmos-sdk/types/query" "google.golang.org/grpc" ) @@ -57,3 +58,30 @@ func (m *module) GetParams(ctx context.Context) (*types.QueryParamsResponse, err return resp, nil } + +// ListActionsBySuperNode lists actions assigned to a specific supernode. +func (m *module) ListActionsBySuperNode(ctx context.Context, superNodeAddress string) (*types.QueryListActionsBySuperNodeResponse, error) { + var all []*types.Action + var nextKey []byte + for { + resp, err := m.client.ListActionsBySuperNode(ctx, &types.QueryListActionsBySuperNodeRequest{ + SuperNodeAddress: superNodeAddress, + Pagination: &query.PageRequest{ + Key: nextKey, + Limit: 100, + }, + }) + if err != nil { + return nil, err + } + if resp == nil { + return &types.QueryListActionsBySuperNodeResponse{Actions: all}, nil + } + all = append(all, resp.Actions...) + if resp.Pagination == nil || len(resp.Pagination.NextKey) == 0 { + resp.Actions = all + return resp, nil + } + nextKey = resp.Pagination.NextKey + } +} diff --git a/pkg/lumera/modules/action/interface.go b/pkg/lumera/modules/action/interface.go index 585c7bf8..2a6d7123 100644 --- a/pkg/lumera/modules/action/interface.go +++ b/pkg/lumera/modules/action/interface.go @@ -13,6 +13,7 @@ type Module interface { GetAction(ctx context.Context, actionID string) (*types.QueryGetActionResponse, error) GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) GetParams(ctx context.Context) (*types.QueryParamsResponse, error) + ListActionsBySuperNode(ctx context.Context, superNodeAddress string) (*types.QueryListActionsBySuperNodeResponse, error) } // NewModule creates a new Action module client diff --git a/pkg/storagechallenge/deterministic/lep6_test.go b/pkg/storagechallenge/deterministic/lep6_test.go index 36142609..f908eaf4 100644 --- a/pkg/storagechallenge/deterministic/lep6_test.go +++ b/pkg/storagechallenge/deterministic/lep6_test.go @@ -665,3 +665,74 @@ func TestSortStrings_StableForPairs(t *testing.T) { t.Fatalf("stable sort mismatch: %v != %v", xs, want) } } + +// TestChainDefaults_BoundToSupernodeConstants is a chain-binding cross-validation +// guard: the supernode's deterministic primitives are a parallel implementation +// of chain logic, and any drift between supernode constants and chain defaults +// breaks consensus equivalence silently. This test imports the chain types +// package (already in go.mod via PR1) and asserts the supernode's hardcoded +// constants and the values consumed by SelectLEP6Targets/ClassifyTicketBucket +// match chain DefaultParams() byte-for-byte. +// +// Why this test belongs here (not in PR6 e2e): chain defaults are pure values +// reachable without an sdk.Context. A unit-level binding catches drift the +// instant chain bumps a default, before any integration env is even spun up. +// +// If chain ever renames or removes one of these symbols, this test will fail +// to compile — which is the desired loud-failure mode. +func TestChainDefaults_BoundToSupernodeConstants(t *testing.T) { + chain := audittypes.DefaultParams().WithDefaults() + + // 1. Challenge target divisor — drives SelectLEP6Targets count. + if got, want := uint32(LEP6ChallengeTargetDivisor), audittypes.DefaultStorageTruthChallengeTargetDivisor; got != want { + t.Fatalf("LEP6ChallengeTargetDivisor drift: supernode=%d chain=%d", got, want) + } + if got, want := chain.StorageTruthChallengeTargetDivisor, audittypes.DefaultStorageTruthChallengeTargetDivisor; got != want { + t.Fatalf("DefaultParams().StorageTruthChallengeTargetDivisor drift: %d vs %d", got, want) + } + + // 2. Recent-bucket window — drives ClassifyTicketBucket RECENT boundary. + if chain.StorageTruthRecentBucketMaxBlocks != audittypes.DefaultStorageTruthRecentBucketMaxBlocks { + t.Fatalf("DefaultStorageTruthRecentBucketMaxBlocks drift: params=%d const=%d", + chain.StorageTruthRecentBucketMaxBlocks, audittypes.DefaultStorageTruthRecentBucketMaxBlocks) + } + + // 3. Old-bucket window — drives ClassifyTicketBucket OLD boundary. + if chain.StorageTruthOldBucketMinBlocks != audittypes.DefaultStorageTruthOldBucketMinBlocks { + t.Fatalf("DefaultStorageTruthOldBucketMinBlocks drift: params=%d const=%d", + chain.StorageTruthOldBucketMinBlocks, audittypes.DefaultStorageTruthOldBucketMinBlocks) + } + + // 4. Old > Recent invariant — chain's bucket classification depends on + // OLD floor being strictly greater than RECENT ceiling. If governance + // ever flips this, supernode's ClassifyTicketBucket would silently + // misclassify all in-between heights. + if chain.StorageTruthOldBucketMinBlocks <= chain.StorageTruthRecentBucketMaxBlocks { + t.Fatalf("OLD floor must exceed RECENT ceiling: old=%d recent=%d", + chain.StorageTruthOldBucketMinBlocks, chain.StorageTruthRecentBucketMaxBlocks) + } + + // 5. End-to-end: drive SelectLEP6Targets with chain-sourced divisor on the + // chain test's exact fixture and confirm the same 2-target outcome the + // chain test asserts. Locks supernode→chain agreement to chain's own + // test vector, not just a self-generated one. + active := []string{"sn-a", "sn-b", "sn-c", "sn-d", "sn-e", "sn-f"} + tgt := SelectLEP6Targets(active, chainSeed, chain.StorageTruthChallengeTargetDivisor) + if len(tgt) != 2 { + t.Fatalf("chain-defaults end-to-end: want 2 targets per chain test, got %d (%v)", len(tgt), tgt) + } + + // 6. ClassifyTicketBucket sanity at chain-default boundaries: an action + // anchored exactly RECENT_MAX blocks behind current is RECENT; anchored + // OLD_MIN behind is OLD. Crosses both windows; locks bucket logic to + // chain defaults. + const currentH int64 = 1_000_000 + recentAnchor := currentH - int64(chain.StorageTruthRecentBucketMaxBlocks) // RECENT inclusive at boundary + oldAnchor := currentH - int64(chain.StorageTruthOldBucketMinBlocks) // OLD inclusive at boundary + if got := ClassifyTicketBucket(currentH, recentAnchor, chain.StorageTruthRecentBucketMaxBlocks, chain.StorageTruthOldBucketMinBlocks); got != audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_RECENT { + t.Fatalf("RECENT boundary classification drift: got %v at delta=%d", got, currentH-recentAnchor) + } + if got := ClassifyTicketBucket(currentH, oldAnchor, chain.StorageTruthRecentBucketMaxBlocks, chain.StorageTruthOldBucketMinBlocks); got != audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_OLD { + t.Fatalf("OLD boundary classification drift: got %v at delta=%d", got, currentH-oldAnchor) + } +} diff --git a/pkg/storagechallenge/lep6_resolution.go b/pkg/storagechallenge/lep6_resolution.go new file mode 100644 index 00000000..37e6cf0f --- /dev/null +++ b/pkg/storagechallenge/lep6_resolution.go @@ -0,0 +1,159 @@ +// Package storagechallenge contains the supernode-side off-chain glue for the +// LEP-6 compound storage challenge runtime. The deterministic primitives that +// must agree byte-for-byte across reporters live in +// pkg/storagechallenge/deterministic; this file exposes the integration helpers +// that depend on cascade metadata and chain-side caps. +package storagechallenge + +import ( + "errors" + "fmt" + "math" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" +) + +// MaxStorageProofResultsPerReport mirrors the chain-side cap that the audit +// keeper enforces in DeliverTx for MsgSubmitEpochReport: an epoch report +// carrying more than this many StorageProofResults is rejected wholesale. +// +// Source of truth: lumera/x/audit/v1/types/keys.go (lines 11-13) at the +// pinned chain commit. The supernode result buffer must self-throttle to this +// cap before handing results to the host reporter — see +// supernode/storage_challenge/result_buffer.go. +const MaxStorageProofResultsPerReport = 16 + +// ErrUnspecifiedArtifactClass is returned when a caller passes the zero/UNSPECIFIED +// StorageProofArtifactClass to a resolver that requires a concrete class. +var ErrUnspecifiedArtifactClass = errors.New("storagechallenge: artifact class is UNSPECIFIED") + +// ResolveArtifactCount returns the canonical artifact count for (meta, class) +// using only the cascade metadata that finalization committed on-chain. It +// replaces a chain GetTicketArtifactCount RPC that does not exist (LEP-6 v2 +// plan §9, Resolved Decision 8). +// +// Semantics: +// - INDEX -> uint32(meta.RqIdsIc) +// - SYMBOL -> uint32(len(meta.RqIdsIds)) +// - UNSPECIFIED -> error +// +// If both counts are zero (legacy / malformed ticket), this returns (0, nil) +// because the chain accepts that case via its TicketArtifactCountState fallback +// path (x/audit/v1/keeper/msg_submit_epoch_report_storage_proofs.go). Callers +// decide whether to skip such a ticket. +func ResolveArtifactCount(meta *actiontypes.CascadeMetadata, class audittypes.StorageProofArtifactClass) (uint32, error) { + if meta == nil { + return 0, errors.New("storagechallenge: nil cascade metadata") + } + switch class { + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX: + return uint32(meta.RqIdsIc), nil + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL: + return uint32(len(meta.RqIdsIds)), nil + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED: + return 0, ErrUnspecifiedArtifactClass + default: + return 0, fmt.Errorf("storagechallenge: unknown artifact class %v", class) + } +} + +// ResolveArtifactKey returns the deterministic artifact key (content-addressed +// identifier) for (meta, class, ordinal). +// +// - SYMBOL: meta.RqIdsIds[ordinal] (bounds-checked). +// - INDEX: derived via cascadekit.GenerateIndexIDs(meta.Signatures, RqIdsIc, +// RqIdsMax) — the same derivation the supernode cascade module uses to +// materialise INDEX files (supernode/cascade/helper.go, +// supernode/cascade/reseed.go). Per LEP-6 v2 plan §9 Resolved Decision 2. +// +// Returns an error on UNSPECIFIED class, ordinal out of range, or empty +// metadata for the requested class. +func ResolveArtifactKey(meta *actiontypes.CascadeMetadata, class audittypes.StorageProofArtifactClass, ordinal uint32) (string, error) { + if meta == nil { + return "", errors.New("storagechallenge: nil cascade metadata") + } + switch class { + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL: + if int(ordinal) >= len(meta.RqIdsIds) { + return "", fmt.Errorf("storagechallenge: SYMBOL ordinal %d out of range (have %d ids)", ordinal, len(meta.RqIdsIds)) + } + return meta.RqIdsIds[ordinal], nil + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX: + if meta.Signatures == "" { + return "", errors.New("storagechallenge: INDEX key requested but cascade metadata has empty signatures") + } + if meta.RqIdsMax == 0 { + return "", errors.New("storagechallenge: INDEX key requested but RqIdsMax is zero") + } + ids, err := cascadekit.GenerateIndexIDs(meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return "", fmt.Errorf("storagechallenge: derive INDEX ids: %w", err) + } + if int(ordinal) >= len(ids) { + return "", fmt.Errorf("storagechallenge: INDEX ordinal %d out of range (derived %d ids)", ordinal, len(ids)) + } + return ids[ordinal], nil + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED: + return "", ErrUnspecifiedArtifactClass + default: + return "", fmt.Errorf("storagechallenge: unknown artifact class %v", class) + } +} + +// ResolveArtifactSize returns the exact byte size used to derive LEP-6 +// multi-range offsets for a selected artifact. +// +// SYMBOL artifacts are RaptorQ symbols. The exact symbol size is derived from +// the finalized Action.FileSizeKbs and meta.RqIdsMax: +// +// symbolSize = ceil(fileSizeKbs*1024 / meta.RqIdsMax) +// +// INDEX artifacts are generated deterministically from meta.Signatures, +// meta.RqIdsIc, and meta.RqIdsMax; their exact compressed byte length is the +// length of the selected generated index file. +func ResolveArtifactSize(act *actiontypes.Action, meta *actiontypes.CascadeMetadata, class audittypes.StorageProofArtifactClass, ordinal uint32) (uint64, error) { + if act == nil { + return 0, errors.New("storagechallenge: nil action") + } + if meta == nil { + return 0, errors.New("storagechallenge: nil cascade metadata") + } + switch class { + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL: + if act.FileSizeKbs <= 0 { + return 0, fmt.Errorf("storagechallenge: action fileSizeKbs must be > 0 for SYMBOL artifact size (got %d)", act.FileSizeKbs) + } + if meta.RqIdsMax <= 0 { + return 0, errors.New("storagechallenge: RqIdsMax must be > 0 for SYMBOL artifact size") + } + if int(ordinal) >= len(meta.RqIdsIds) { + return 0, fmt.Errorf("storagechallenge: SYMBOL ordinal %d out of range (have %d ids)", ordinal, len(meta.RqIdsIds)) + } + fileBytes := uint64(act.FileSizeKbs) * 1024 + if fileBytes > math.MaxUint64-uint64(meta.RqIdsMax)+1 { + return 0, errors.New("storagechallenge: SYMBOL artifact size overflow") + } + return (fileBytes + uint64(meta.RqIdsMax) - 1) / uint64(meta.RqIdsMax), nil + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX: + if meta.Signatures == "" { + return 0, errors.New("storagechallenge: INDEX size requested but cascade metadata has empty signatures") + } + if meta.RqIdsMax == 0 { + return 0, errors.New("storagechallenge: INDEX size requested but RqIdsMax is zero") + } + _, files, err := cascadekit.GenerateIndexFiles(meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return 0, fmt.Errorf("storagechallenge: derive INDEX files: %w", err) + } + if int(ordinal) >= len(files) { + return 0, fmt.Errorf("storagechallenge: INDEX ordinal %d out of range (derived %d files)", ordinal, len(files)) + } + return uint64(len(files[ordinal])), nil + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED: + return 0, ErrUnspecifiedArtifactClass + default: + return 0, fmt.Errorf("storagechallenge: unknown artifact class %v", class) + } +} diff --git a/pkg/storagechallenge/lep6_resolution_test.go b/pkg/storagechallenge/lep6_resolution_test.go new file mode 100644 index 00000000..19ee7f38 --- /dev/null +++ b/pkg/storagechallenge/lep6_resolution_test.go @@ -0,0 +1,124 @@ +package storagechallenge + +import ( + "strings" + "testing" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" +) + +func TestResolveArtifactCount_Index_Symbol_Unspecified(t *testing.T) { + meta := &actiontypes.CascadeMetadata{ + RqIdsIc: 7, + RqIdsMax: 12, + RqIdsIds: []string{"a", "b", "c", "d"}, + } + + gotIdx, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX) + if err != nil { + t.Fatalf("INDEX: unexpected error: %v", err) + } + if gotIdx != 7 { + t.Fatalf("INDEX count: want 7, got %d", gotIdx) + } + + gotSym, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL) + if err != nil { + t.Fatalf("SYMBOL: unexpected error: %v", err) + } + if gotSym != 4 { + t.Fatalf("SYMBOL count: want 4, got %d", gotSym) + } + + if _, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED); err == nil { + t.Fatalf("UNSPECIFIED: expected error, got nil") + } +} + +func TestResolveArtifactCount_LegacyZero(t *testing.T) { + meta := &actiontypes.CascadeMetadata{} // both INDEX (RqIdsIc) and SYMBOL (len(RqIdsIds)) are zero + for _, class := range []audittypes.StorageProofArtifactClass{ + audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX, + audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, + } { + got, err := ResolveArtifactCount(meta, class) + if err != nil { + t.Fatalf("class=%v: legacy zero should not error, got: %v", class, err) + } + if got != 0 { + t.Fatalf("class=%v: want 0, got %d", class, got) + } + } +} + +func TestResolveArtifactKey_Symbol_OutOfRange(t *testing.T) { + meta := &actiontypes.CascadeMetadata{RqIdsIds: []string{"id-0", "id-1"}} + + got, err := ResolveArtifactKey(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, 1) + if err != nil { + t.Fatalf("in-range SYMBOL: unexpected error: %v", err) + } + if got != "id-1" { + t.Fatalf("SYMBOL[1]: want id-1, got %q", got) + } + + if _, err := ResolveArtifactKey(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, 2); err == nil { + t.Fatalf("SYMBOL[2]: expected out-of-range error, got nil") + } else if !strings.Contains(err.Error(), "out of range") { + t.Fatalf("SYMBOL[2]: error should mention out of range, got: %v", err) + } +} + +func TestResolveArtifactKey_Index_KnownVector(t *testing.T) { + meta := &actiontypes.CascadeMetadata{Signatures: "index-signature-format", RqIdsIc: 2, RqIdsMax: 5} + got0, err := ResolveArtifactKey(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX, 0) + if err != nil { + t.Fatalf("INDEX[0]: unexpected error: %v", err) + } + got1, err := ResolveArtifactKey(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX, 1) + if err != nil { + t.Fatalf("INDEX[1]: unexpected error: %v", err) + } + if got0 == "" || got1 == "" || got0 == got1 { + t.Fatalf("expected distinct non-empty index ids, got %q and %q", got0, got1) + } + if _, err := ResolveArtifactKey(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX, 99); err == nil { + t.Fatalf("INDEX[99]: expected out-of-range error, got nil") + } +} + +func TestResolveArtifactSize_SymbolUsesCeilFileBytesOverRqMax(t *testing.T) { + act := &actiontypes.Action{FileSizeKbs: 10} + meta := &actiontypes.CascadeMetadata{RqIdsMax: 3, RqIdsIds: []string{"s0", "s1", "s2"}} + got, err := ResolveArtifactSize(act, meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, 2) + if err != nil { + t.Fatalf("SYMBOL size: unexpected error: %v", err) + } + // ceil(10*1024 / 3) = 3414. + if got != 3414 { + t.Fatalf("SYMBOL size: want 3414, got %d", got) + } +} + +func TestResolveArtifactSize_IndexUsesGeneratedFileLength(t *testing.T) { + act := &actiontypes.Action{FileSizeKbs: 10} + meta := &actiontypes.CascadeMetadata{Signatures: "index-signature-format", RqIdsIc: 2, RqIdsMax: 5} + got, err := ResolveArtifactSize(act, meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX, 1) + if err != nil { + t.Fatalf("INDEX size: unexpected error: %v", err) + } + if got == 0 { + t.Fatalf("INDEX size: expected non-zero generated file length") + } + if _, err := ResolveArtifactSize(act, meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX, 99); err == nil { + t.Fatalf("INDEX[99]: expected out-of-range error, got nil") + } +} + +func TestResolveArtifactKey_Unspecified(t *testing.T) { + meta := &actiontypes.CascadeMetadata{} + if _, err := ResolveArtifactKey(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED, 0); err == nil { + t.Fatalf("UNSPECIFIED: expected error, got nil") + } +} diff --git a/pkg/testutil/lumera.go b/pkg/testutil/lumera.go index 56096797..1b35e0f1 100644 --- a/pkg/testutil/lumera.go +++ b/pkg/testutil/lumera.go @@ -171,6 +171,10 @@ func (m *MockActionModule) GetParams(ctx context.Context) (*actiontypes.QueryPar return &actiontypes.QueryParamsResponse{}, nil } +func (m *MockActionModule) ListActionsBySuperNode(ctx context.Context, superNodeAddress string) (*actiontypes.QueryListActionsBySuperNodeResponse, error) { + return &actiontypes.QueryListActionsBySuperNodeResponse{}, nil +} + // MockActionMsgModule implements the action_msg.Module interface for testing type MockActionMsgModule struct{} diff --git a/proto/supernode/storage_challenge.proto b/proto/supernode/storage_challenge.proto index 6494787c..60b0c7ac 100644 --- a/proto/supernode/storage_challenge.proto +++ b/proto/supernode/storage_challenge.proto @@ -6,6 +6,45 @@ option go_package = "github.com/LumeraProtocol/supernode/v2/gen/supernode"; service StorageChallengeService { rpc GetSliceProof(GetSliceProofRequest) returns (GetSliceProofResponse) {} rpc VerifySliceProof(VerifySliceProofRequest) returns (VerifySliceProofResponse) {} + rpc GetCompoundProof(GetCompoundProofRequest) returns (GetCompoundProofResponse) {} +} + +// ByteRange represents a half-open byte range [start, end) into an artifact. +message ByteRange { + uint64 start = 1; + uint64 end = 2; // exclusive +} + +message GetCompoundProofRequest { + string challenge_id = 1; + uint64 epoch_id = 2; + bytes seed = 3; + string ticket_id = 4; + string target_supernode_account = 5; + string challenger_account = 6; + repeated string observer_accounts = 7; + uint32 artifact_class = 8; // mirrors audittypes.StorageProofArtifactClass + uint32 artifact_ordinal = 9; + uint32 artifact_count = 10; + uint32 bucket_type = 11; // mirrors audittypes.StorageProofBucketType + string artifact_key = 12; + uint64 artifact_size = 13; + repeated ByteRange ranges = 14; // exactly LEP6CompoundRangesPerArtifact (=4); each size LEP6CompoundRangeLenBytes (=256) +} + +message GetCompoundProofResponse { + string challenge_id = 1; + uint64 epoch_id = 2; + string ticket_id = 3; + uint32 artifact_class = 4; + uint32 artifact_ordinal = 5; + uint32 bucket_type = 6; + string artifact_key = 7; + repeated bytes range_bytes = 8; // i-th matches i-th request range + string proof_hash_hex = 9; // BLAKE3(concat(range_bytes...)) lowercase hex + string recipient_signature = 10; // recipient's keyring signature + bool ok = 11; + string error = 12; } message GetSliceProofRequest { diff --git a/supernode/cmd/lep6_adapters.go b/supernode/cmd/lep6_adapters.go new file mode 100644 index 00000000..19e8984b --- /dev/null +++ b/supernode/cmd/lep6_adapters.go @@ -0,0 +1,77 @@ +package cmd + +import ( + "context" + "fmt" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" +) + +// p2pArtifactReader is the recipient-side adapter that satisfies the +// transport/grpc/storage_challenge ArtifactReader interface by retrieving +// the full artifact bytes from the local p2p store and slicing the +// requested range. The PR3 path is correct-but-not-optimal: a future +// iteration can replace this with a range-scoped reader. +type p2pArtifactReader struct { + p2p p2p.P2P +} + +func newP2PArtifactReader(p p2p.P2P) *p2pArtifactReader { + return &p2pArtifactReader{p2p: p} +} + +// ReadArtifactRange returns bytes [start, end) for the given key. class is +// currently informational; storage is content-addressed by key alone. +func (r *p2pArtifactReader) ReadArtifactRange(ctx context.Context, _ audittypes.StorageProofArtifactClass, key string, start, end uint64) ([]byte, error) { + if r == nil || r.p2p == nil { + return nil, fmt.Errorf("p2pArtifactReader: nil p2p service") + } + if end <= start { + return nil, fmt.Errorf("p2pArtifactReader: invalid range [%d,%d)", start, end) + } + data, err := r.p2p.Retrieve(ctx, key, true) + if err != nil { + return nil, err + } + if uint64(len(data)) < end { + return nil, fmt.Errorf("p2pArtifactReader: range [%d,%d) out of bounds (size=%d)", start, end, len(data)) + } + out := make([]byte, end-start) + copy(out, data[start:end]) + return out, nil +} + +// cascadeMetaProvider implements storage_challenge.CascadeMetaProvider via +// the lumera Action module. It fetches the on-chain action, decodes its +// CascadeMetadata, and returns it alongside the finalized action FileSizeKbs +// needed for exact artifact-size derivation. +type cascadeMetaProvider struct { + client lumera.Client +} + +func newCascadeMetaProvider(c lumera.Client) *cascadeMetaProvider { + return &cascadeMetaProvider{client: c} +} + +func (m *cascadeMetaProvider) GetCascadeMetadata(ctx context.Context, ticketID string) (*actiontypes.CascadeMetadata, uint64, error) { + if m == nil || m.client == nil || m.client.Action() == nil { + return nil, 0, fmt.Errorf("cascadeMetaProvider: nil action module") + } + resp, err := m.client.Action().GetAction(ctx, ticketID) + if err != nil || resp == nil { + return nil, 0, fmt.Errorf("get action %q: %w", ticketID, err) + } + act := resp.GetAction() + if act == nil { + return nil, 0, fmt.Errorf("get action %q: nil action", ticketID) + } + meta, err := cascadekit.UnmarshalCascadeMetadata(act.Metadata) + if err != nil { + return nil, 0, fmt.Errorf("decode cascade metadata for %q: %w", ticketID, err) + } + return &meta, uint64(act.FileSizeKbs), nil +} diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 3bed95d3..2fe062b9 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -169,7 +169,7 @@ The supernode will connect to the Lumera network and begin participating in the // race against the SN's own ~5s auto-submit ticker. Production deployments must // leave this unset; gated behind an env var with no config-file surface so the // canonical path is unchanged. - var hostReporter service + var hostReporter *hostReporterService.Service if v := strings.TrimSpace(os.Getenv("LUMERA_SUPERNODE_DISABLE_HOST_REPORTER")); v == "1" || strings.EqualFold(v, "true") { logtrace.Info(ctx, "host_reporter disabled via LUMERA_SUPERNODE_DISABLE_HOST_REPORTER", logtrace.Fields{}) } else { @@ -206,7 +206,16 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Fatal(ctx, "Failed to open history DB", logtrace.Fields{"error": err.Error()}) } - storageChallengeServer := storageChallengeRPC.NewServer(appConfig.SupernodeConfig.Identity, p2pService, historyStore) + // LEP-6 result buffer: drained by host_reporter on each tick and + // appended to by the LEP6Dispatcher. + resultBuffer := storageChallengeService.NewBuffer() + if hostReporter != nil { + hostReporter.SetProofResultProvider(resultBuffer) + } + + storageChallengeServer := storageChallengeRPC.NewServer(appConfig.SupernodeConfig.Identity, p2pService, historyStore). + WithArtifactReader(newP2PArtifactReader(p2pService)). + WithRecipientSigner(kr, appConfig.SupernodeConfig.KeyName) var storageChallengeRunner *storageChallengeService.Service if appConfig.StorageChallengeConfig.Enabled { storageChallengeRunner, err = storageChallengeService.NewService( @@ -226,6 +235,24 @@ The supernode will connect to the Lumera network and begin participating in the if err != nil { logtrace.Fatal(ctx, "Failed to initialize storage challenge runner", logtrace.Fields{"error": err.Error()}) } + + // LEP-6 dispatcher (mode-gated internally; see DispatchEpoch). + if appConfig.StorageChallengeConfig.LEP6.Enabled { + dispatcher, derr := storageChallengeService.NewLEP6Dispatcher( + lumeraClient, + kr, + appConfig.SupernodeConfig.KeyName, + appConfig.SupernodeConfig.Identity, + storageChallengeService.NewSecureSupernodeClientFactory(lumeraClient, kr, appConfig.SupernodeConfig.Identity, appConfig.SupernodeConfig.Port), + storageChallengeService.NewChainTicketProvider(lumeraClient), + newCascadeMetaProvider(lumeraClient), + resultBuffer, + ) + if derr != nil { + logtrace.Fatal(ctx, "Failed to initialize LEP-6 dispatcher", logtrace.Fields{"error": derr.Error()}) + } + storageChallengeRunner.SetLEP6Dispatcher(dispatcher) + } } // Create supernode server diff --git a/supernode/config/config.go b/supernode/config/config.go index 16dc3c6f..4a3722e7 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" "strings" + "time" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "gopkg.in/yaml.v3" @@ -66,9 +67,29 @@ type LogConfig struct { } type StorageChallengeConfig struct { - Enabled bool `yaml:"enabled"` - PollIntervalMs uint64 `yaml:"poll_interval_ms,omitempty"` - SubmitEvidence bool `yaml:"submit_evidence,omitempty"` + Enabled bool `yaml:"enabled"` + PollIntervalMs uint64 `yaml:"poll_interval_ms,omitempty"` + SubmitEvidence bool `yaml:"submit_evidence,omitempty"` + LEP6 StorageChallengeLEP6Config `yaml:"lep6,omitempty"` +} + +// StorageChallengeLEP6Config holds the supernode-binary-owned knobs for +// the LEP-6 compound storage challenge runtime. All chain-driven knobs +// (bucket thresholds, ranges-per-artifact, range size, enforcement mode) +// flow via x/audit Params and are deliberately omitted here. See +// docs/plans/LEP6_SUPERNODE_IMPLEMENTATION_PLAN_v2.md §2.3. +type StorageChallengeLEP6Config struct { + // Enabled gates construction of the LEP6Dispatcher. When false, the + // legacy single-range loop runs alone (default true; PR3 ships LEP-6 + // alongside the legacy loop with internal mode-gating). + Enabled bool `yaml:"enabled"` + // MaxConcurrentTargets bounds parallelism inside DispatchEpoch. + // Default 4. Reserved for follow-up parallelism work; PR3 dispatch + // is currently sequential per target. + MaxConcurrentTargets int `yaml:"max_concurrent_targets,omitempty"` + // RecipientReadTimeout caps a single GetCompoundProof RPC. Default + // 30s. + RecipientReadTimeout time.Duration `yaml:"recipient_read_timeout,omitempty"` } type Config struct { diff --git a/supernode/storage_challenge/lep6_client_factory.go b/supernode/storage_challenge/lep6_client_factory.go new file mode 100644 index 00000000..61821a7e --- /dev/null +++ b/supernode/storage_challenge/lep6_client_factory.go @@ -0,0 +1,114 @@ +package storage_challenge + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "sync" + + "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" + "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" + grpcclient "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "google.golang.org/grpc" +) + +// secureSupernodeClientFactory dials peer supernodes using the same secure +// gRPC stack the legacy storage_challenge loop uses (see +// service.go::callGetSliceProof). It is the production implementation of +// SupernodeClientFactory wired by supernode/cmd/start.go. +type secureSupernodeClientFactory struct { + lumera lumera.Client + kr keyring.Keyring + self string + defaultPort uint16 + + mu sync.Mutex + grpcClient *grpcclient.Client + grpcOpts *grpcclient.ClientOptions +} + +// NewSecureSupernodeClientFactory builds a SupernodeClientFactory backed by +// the secure key-exchange gRPC stack. self is the local identity used in the +// ALTS handshake; defaultPort is the supernode port to fall back to when the +// chain-registered LatestAddress contains only a host. +func NewSecureSupernodeClientFactory(client lumera.Client, kr keyring.Keyring, self string, defaultPort uint16) SupernodeClientFactory { + return &secureSupernodeClientFactory{ + lumera: client, + kr: kr, + self: strings.TrimSpace(self), + defaultPort: defaultPort, + } +} + +func (f *secureSupernodeClientFactory) ensureClient() error { + f.mu.Lock() + defer f.mu.Unlock() + + if f.grpcClient != nil { + return nil + } + validator := lumera.NewSecureKeyExchangeValidator(f.lumera) + creds, err := credentials.NewClientCreds(&credentials.ClientOptions{ + CommonOptions: credentials.CommonOptions{ + Keyring: f.kr, + LocalIdentity: f.self, + PeerType: securekeyx.Supernode, + Validator: validator, + }, + }) + if err != nil { + return fmt.Errorf("create secure gRPC client creds: %w", err) + } + f.grpcClient = grpcclient.NewClient(creds) + f.grpcOpts = grpcclient.DefaultClientOptions() + f.grpcOpts.EnableRetries = true + return nil +} + +// Dial resolves the peer's chain-registered address and opens a secure +// gRPC connection. The returned SupernodeCompoundClient holds onto the +// underlying *grpc.ClientConn and closes it on Close(). +func (f *secureSupernodeClientFactory) Dial(ctx context.Context, target string) (SupernodeCompoundClient, error) { + if err := f.ensureClient(); err != nil { + return nil, err + } + info, err := f.lumera.SuperNode().GetSupernodeWithLatestAddress(ctx, target) + if err != nil || info == nil { + return nil, fmt.Errorf("resolve target %q: %w", target, err) + } + raw := strings.TrimSpace(info.LatestAddress) + if raw == "" { + return nil, fmt.Errorf("no address for target %q", target) + } + host, port, ok := parseHostAndPort(raw, int(f.defaultPort)) + if !ok || strings.TrimSpace(host) == "" { + return nil, fmt.Errorf("invalid address %q for target %q", raw, target) + } + addr := net.JoinHostPort(strings.TrimSpace(host), strconv.Itoa(port)) + conn, err := f.grpcClient.Connect(ctx, fmt.Sprintf("%s@%s", strings.TrimSpace(target), addr), f.grpcOpts) + if err != nil { + return nil, fmt.Errorf("dial target %q: %w", target, err) + } + return &secureCompoundClient{conn: conn, client: supernode.NewStorageChallengeServiceClient(conn)}, nil +} + +type secureCompoundClient struct { + conn *grpc.ClientConn + client supernode.StorageChallengeServiceClient +} + +func (c *secureCompoundClient) GetCompoundProof(ctx context.Context, req *supernode.GetCompoundProofRequest) (*supernode.GetCompoundProofResponse, error) { + return c.client.GetCompoundProof(ctx, req) +} + +func (c *secureCompoundClient) Close() error { + if c == nil || c.conn == nil { + return nil + } + return c.conn.Close() +} diff --git a/supernode/storage_challenge/lep6_dispatch.go b/supernode/storage_challenge/lep6_dispatch.go new file mode 100644 index 00000000..2613fa19 --- /dev/null +++ b/supernode/storage_challenge/lep6_dispatch.go @@ -0,0 +1,558 @@ +package storage_challenge + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/gen/supernode" + snkeyring "github.com/LumeraProtocol/supernode/v2/pkg/keyring" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge" + "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge/deterministic" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "lukechampine.com/blake3" +) + +// LEP6 dispatcher — challenger-side per-epoch loop for the LEP-6 compound +// storage challenge. See docs/plans/LEP6_SUPERNODE_IMPLEMENTATION_PLAN_v2.md +// §2.3 (PR3) for full design rationale and §9-§11 of LEP6.md for the +// deterministic protocol surfaces. +// +// PR3 scope: +// - Reads EpochAnchor + assigned targets + audit Params (mode gate + +// bucket thresholds + multi-range params). +// - For each (target, bucket ∈ {RECENT, OLD}) deterministically selects +// ticket / artifact / ordinal / ranges. +// - Issues GetCompoundProof to the target via SupernodeClientFactory. +// - Locally recomputes the BLAKE3 proof hash, classifies PASS/FAIL, +// signs the transcript, and appends the StorageProofResult to the +// buffer for the host reporter to drain. +// +// PR3 does NOT cover: +// - Observer attestation collection (post-LEP-6 work). +// - RECHECK bucket dispatch (PR5 recheck service). +// - Probation/heal-op exclusion semantics. +// +// Ticket discovery is delegated to a TicketProvider interface. Production +// startup wires ChainTicketProvider, backed by x/action ListActionsBySuperNode; +// NoTicketProvider is retained only for tests and defensive fallback. + +// SupernodeCompoundClient is the minimal RPC surface the dispatcher needs +// to drive a target's recipient handler. The real implementation wraps the +// secure gRPC stub (gen/supernode.StorageChallengeServiceClient); tests +// inject a stub directly. +type SupernodeCompoundClient interface { + GetCompoundProof(ctx context.Context, req *supernode.GetCompoundProofRequest) (*supernode.GetCompoundProofResponse, error) + Close() error +} + +// SupernodeClientFactory dials a target supernode and returns a compound- +// proof client. Implementations should reuse the existing supernode-to- +// supernode secure gRPC dialer (see service.go::callGetSliceProof for the +// reference implementation). +type SupernodeClientFactory interface { + Dial(ctx context.Context, targetSupernodeAccount string) (SupernodeCompoundClient, error) +} + +// CascadeMetaProvider returns the cascade metadata for a ticket. The +// resolver in pkg/storagechallenge/lep6_resolution.go consumes the result +// to derive (artifact_count, artifact_key) without round-tripping to the +// chain on the hot path. +type CascadeMetaProvider interface { + GetCascadeMetadata(ctx context.Context, ticketID string) (*actiontypes.CascadeMetadata, uint64, error) +} + +// TicketProvider enumerates the cascade tickets that the given target +// supernode is a participant on. Returns the action_id and the action's +// register-time block height (for ClassifyTicketBucket). +type TicketProvider interface { + TicketsForTarget(ctx context.Context, targetSupernodeAccount string) ([]TicketDescriptor, error) +} + +// TicketDescriptor is a minimal projection of a cascade action that the +// dispatcher needs for bucket classification. +type TicketDescriptor struct { + TicketID string + AnchorBlock int64 +} + +// NoTicketProvider always reports zero tickets. It is used by tests and as a +// defensive fallback only; production startup wires ChainTicketProvider. +type NoTicketProvider struct{} + +// TicketsForTarget always returns nil, nil. +func (NoTicketProvider) TicketsForTarget(_ context.Context, _ string) ([]TicketDescriptor, error) { + return nil, nil +} + +// LEP6Dispatcher is the per-epoch challenger loop. Construct via +// NewLEP6Dispatcher and invoke DispatchEpoch from the storage_challenge +// Service tick. +type LEP6Dispatcher struct { + client lumera.Client + keyring keyring.Keyring + keyName string + self string + supernodeClient SupernodeClientFactory + tickets TicketProvider + meta CascadeMetaProvider + buffer *Buffer +} + +// NewLEP6Dispatcher constructs a dispatcher. supernodeClient, tickets, +// meta, and buffer are required; passing nil for any of them returns an +// error. +func NewLEP6Dispatcher( + client lumera.Client, + kr keyring.Keyring, + keyName, self string, + supernodeClient SupernodeClientFactory, + tickets TicketProvider, + meta CascadeMetaProvider, + buffer *Buffer, +) (*LEP6Dispatcher, error) { + if client == nil || client.Audit() == nil { + return nil, fmt.Errorf("lep6 dispatcher: lumera client missing audit module") + } + if kr == nil { + return nil, fmt.Errorf("lep6 dispatcher: keyring is nil") + } + if strings.TrimSpace(keyName) == "" { + return nil, fmt.Errorf("lep6 dispatcher: key name is empty") + } + if strings.TrimSpace(self) == "" { + return nil, fmt.Errorf("lep6 dispatcher: self identity is empty") + } + if supernodeClient == nil { + return nil, fmt.Errorf("lep6 dispatcher: supernode client factory is nil") + } + if tickets == nil { + tickets = NoTicketProvider{} + } + if meta == nil { + return nil, fmt.Errorf("lep6 dispatcher: cascade meta provider is nil") + } + if buffer == nil { + return nil, fmt.Errorf("lep6 dispatcher: result buffer is nil") + } + return &LEP6Dispatcher{ + client: client, + keyring: kr, + keyName: keyName, + self: self, + supernodeClient: supernodeClient, + tickets: tickets, + meta: meta, + buffer: buffer, + }, nil +} + +// DispatchEpoch runs the challenger flow for epochID. The flow gates on +// StorageTruthEnforcementMode: UNSPECIFIED skips dispatch entirely; +// SHADOW/SOFT/FULL all execute the same off-chain path (chain enforces +// mode-specific side-effects). +// +// Returns nil if the dispatch was skipped (no error), and any error that +// prevents the loop from running at all (e.g., chain queries fail). +// Per-target failures are surfaced as StorageProofResult{ResultClass=FAIL} +// rather than returning an error. +func (d *LEP6Dispatcher) DispatchEpoch(ctx context.Context, epochID uint64) error { + paramsResp, err := d.client.Audit().GetParams(ctx) + if err != nil || paramsResp == nil { + return fmt.Errorf("lep6 dispatch: get params: %w", err) + } + params := paramsResp.Params + mode := params.StorageTruthEnforcementMode + + if mode == audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED { + logtrace.Debug(ctx, "lep6 dispatch: enforcement mode UNSPECIFIED; skipping", logtrace.Fields{ + "epoch_id": epochID, + }) + return nil + } + + anchorResp, err := d.client.Audit().GetEpochAnchor(ctx, epochID) + if err != nil || anchorResp == nil || anchorResp.Anchor.EpochId != epochID { + return fmt.Errorf("lep6 dispatch: epoch anchor not yet available for epoch %d", epochID) + } + anchor := anchorResp.Anchor + + assigned, err := d.client.Audit().GetAssignedTargets(ctx, d.self, epochID) + if err != nil || assigned == nil { + return fmt.Errorf("lep6 dispatch: get assigned targets: %w", err) + } + targets := assigned.TargetSupernodeAccounts + if len(targets) == 0 { + logtrace.Debug(ctx, "lep6 dispatch: no targets assigned this epoch", logtrace.Fields{ + "epoch_id": epochID, + "mode": mode.String(), + }) + return nil + } + + // Best-effort current height for bucket classification; if it fails + // we still run, falling through to UNSPECIFIED bucket = no eligible. + currentHeight := int64(anchor.EpochEndHeight) + if currentHeight == 0 { + if blk, blkErr := d.client.Node().GetLatestBlock(ctx); blkErr == nil && blk != nil { + if sdk := blk.GetSdkBlock(); sdk != nil { + currentHeight = sdk.Header.Height + } else if b := blk.GetBlock(); b != nil { + currentHeight = b.Header.Height + } + } + } + + logtrace.Info(ctx, "lep6 dispatch: starting epoch", logtrace.Fields{ + "epoch_id": epochID, + "mode": mode.String(), + "targets": len(targets), + }) + + for _, target := range targets { + target = strings.TrimSpace(target) + if target == "" || target == d.self { + continue + } + if err := d.dispatchTarget(ctx, epochID, anchor, params, currentHeight, target); err != nil { + logtrace.Warn(ctx, "lep6 dispatch: target loop error", logtrace.Fields{ + "epoch_id": epochID, + "target": target, + "error": err.Error(), + }) + } + } + return nil +} + +func (d *LEP6Dispatcher) dispatchTarget( + ctx context.Context, + epochID uint64, + anchor audittypes.EpochAnchor, + params audittypes.Params, + currentHeight int64, + target string, +) error { + tickets, err := d.tickets.TicketsForTarget(ctx, target) + if err != nil { + // Treat as transient; emit no-eligible for both buckets so the + // chain still sees this epoch covered. + logtrace.Warn(ctx, "lep6 dispatch: ticket provider error", logtrace.Fields{ + "epoch_id": epochID, "target": target, "error": err.Error(), + }) + tickets = nil + } + + for _, bucket := range []audittypes.StorageProofBucketType{ + audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_RECENT, + audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_OLD, + } { + eligibleIDs := make([]string, 0, len(tickets)) + for _, t := range tickets { + cls := deterministic.ClassifyTicketBucket(currentHeight, t.AnchorBlock, + params.StorageTruthRecentBucketMaxBlocks, params.StorageTruthOldBucketMinBlocks) + if cls == bucket { + eligibleIDs = append(eligibleIDs, t.TicketID) + } + } + + if len(eligibleIDs) == 0 { + d.appendNoEligible(ctx, epochID, anchor, target, bucket) + continue + } + + ticketID := deterministic.SelectTicketForBucket(eligibleIDs, nil, anchor.Seed, target, bucket) + if ticketID == "" { + d.appendNoEligible(ctx, epochID, anchor, target, bucket) + continue + } + + if err := d.dispatchTicket(ctx, epochID, anchor, params, target, bucket, ticketID); err != nil { + logtrace.Warn(ctx, "lep6 dispatch: ticket loop error", logtrace.Fields{ + "epoch_id": epochID, "target": target, "ticket": ticketID, "error": err.Error(), + }) + } + } + return nil +} + +func (d *LEP6Dispatcher) appendNoEligible( + ctx context.Context, + epochID uint64, + anchor audittypes.EpochAnchor, + target string, + bucket audittypes.StorageProofBucketType, +) { + transcriptHashHex, err := deterministic.TranscriptHash(deterministic.TranscriptInputs{ + EpochID: epochID, + ChallengerSupernodeAccount: d.self, + TargetSupernodeAccount: target, + TicketID: "", + Bucket: bucket, + ArtifactClass: audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED, + }) + if err != nil { + logtrace.Warn(ctx, "lep6 dispatch: no-eligible transcript hash error", logtrace.Fields{ + "epoch_id": epochID, "target": target, "error": err.Error(), + }) + return + } + sig, _ := snkeyring.SignBytes(d.keyring, d.keyName, []byte(transcriptHashHex)) + + d.buffer.Append(epochID, &audittypes.StorageProofResult{ + TargetSupernodeAccount: target, + ChallengerSupernodeAccount: d.self, + BucketType: bucket, + ResultClass: audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_NO_ELIGIBLE_TICKET, + TranscriptHash: transcriptHashHex, + ChallengerSignature: hex.EncodeToString(sig), + Details: "no eligible ticket for bucket", + }) + _ = anchor +} + +func (d *LEP6Dispatcher) dispatchTicket( + ctx context.Context, + epochID uint64, + anchor audittypes.EpochAnchor, + params audittypes.Params, + target string, + bucket audittypes.StorageProofBucketType, + ticketID string, +) error { + meta, fileSizeKbs, err := d.meta.GetCascadeMetadata(ctx, ticketID) + if err != nil || meta == nil { + return fmt.Errorf("get cascade meta: %w", err) + } + + indexCount, _ := storagechallenge.ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX) + symbolCount, _ := storagechallenge.ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL) + + class := deterministic.SelectArtifactClass(anchor.Seed, target, ticketID, indexCount, symbolCount) + if class == audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED { + d.appendNoEligible(ctx, epochID, anchor, target, bucket) + return nil + } + + var artifactCount uint32 + switch class { + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX: + artifactCount = indexCount + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL: + artifactCount = symbolCount + } + ordinal, err := deterministic.SelectArtifactOrdinal(anchor.Seed, target, ticketID, class, artifactCount) + if err != nil { + return fmt.Errorf("select ordinal: %w", err) + } + artifactKey, err := storagechallenge.ResolveArtifactKey(meta, class, ordinal) + if err != nil { + return fmt.Errorf("resolve artifact key: %w", err) + } + artifactSize, err := storagechallenge.ResolveArtifactSize(&actiontypes.Action{FileSizeKbs: int64(fileSizeKbs)}, meta, class, ordinal) + if err != nil { + return fmt.Errorf("resolve artifact size: %w", err) + } + + rangeLen := uint64(params.StorageTruthCompoundRangeLenBytes) + if rangeLen == 0 { + rangeLen = uint64(deterministic.LEP6CompoundRangeLenBytes) + } + k := int(params.StorageTruthCompoundRangesPerArtifact) + if k == 0 { + k = deterministic.LEP6CompoundRangesPerArtifact + } + + offsets, err := deterministic.ComputeMultiRangeOffsets(anchor.Seed, target, ticketID, class, ordinal, artifactSize, rangeLen, k) + if err != nil { + return fmt.Errorf("compute offsets: %w", err) + } + ranges := make([]*supernode.ByteRange, len(offsets)) + for i, off := range offsets { + ranges[i] = &supernode.ByteRange{Start: off, End: off + rangeLen} + } + + derivHash, err := deterministic.DerivationInputHash(anchor.Seed, target, ticketID, class, ordinal, offsets, rangeLen) + if err != nil { + return fmt.Errorf("derivation input hash: %w", err) + } + + challengeID := deriveCompoundChallengeID(anchor.Seed, epochID, target, ticketID, class, ordinal) + + req := &supernode.GetCompoundProofRequest{ + ChallengeId: challengeID, + EpochId: epochID, + Seed: anchor.Seed, + TicketId: ticketID, + TargetSupernodeAccount: target, + ChallengerAccount: d.self, + ArtifactClass: uint32(class), + ArtifactOrdinal: ordinal, + ArtifactCount: artifactCount, + BucketType: uint32(bucket), + ArtifactKey: artifactKey, + ArtifactSize: artifactSize, + Ranges: ranges, + } + + conn, err := d.supernodeClient.Dial(ctx, target) + if err != nil { + d.appendFail(ctx, epochID, target, bucket, ticketID, class, ordinal, artifactCount, artifactKey, derivHash, classifyProofFailure(err, "dial"), fmt.Sprintf("dial: %v", err)) + return nil + } + defer func() { _ = conn.Close() }() + + resp, err := conn.GetCompoundProof(ctx, req) + if err != nil || resp == nil || !resp.Ok { + reason := "no response" + if err != nil { + reason = err.Error() + } else if resp != nil && resp.Error != "" { + reason = resp.Error + } + d.appendFail(ctx, epochID, target, bucket, ticketID, class, ordinal, artifactCount, artifactKey, derivHash, classifyProofFailure(err, reason), reason) + return nil + } + + // Local validation: range count + per-range size, and proof hash recompute. + if len(resp.RangeBytes) != k { + d.appendFail(ctx, epochID, target, bucket, ticketID, class, ordinal, artifactCount, artifactKey, derivHash, audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT, fmt.Sprintf("range count mismatch: got %d want %d", len(resp.RangeBytes), k)) + return nil + } + hasher := blake3.New(32, nil) + for i, b := range resp.RangeBytes { + if uint64(len(b)) != rangeLen { + d.appendFail(ctx, epochID, target, bucket, ticketID, class, ordinal, artifactCount, artifactKey, derivHash, audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT, fmt.Sprintf("range[%d] size %d != %d", i, len(b), rangeLen)) + return nil + } + _, _ = hasher.Write(b) + } + gotHash := hex.EncodeToString(hasher.Sum(nil)) + if !strings.EqualFold(gotHash, resp.ProofHashHex) { + d.appendFail(ctx, epochID, target, bucket, ticketID, class, ordinal, artifactCount, artifactKey, derivHash, audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_HASH_MISMATCH, fmt.Sprintf("proof hash mismatch: local=%s remote=%s", gotHash, resp.ProofHashHex)) + return nil + } + + transcriptHashHex, err := deterministic.TranscriptHash(deterministic.TranscriptInputs{ + EpochID: epochID, + ChallengerSupernodeAccount: d.self, + TargetSupernodeAccount: target, + TicketID: ticketID, + Bucket: bucket, + ArtifactClass: class, + ArtifactOrdinal: ordinal, + ArtifactKey: artifactKey, + DerivationInputHash: derivHash, + CompoundProofHashHex: gotHash, + }) + if err != nil { + return fmt.Errorf("transcript hash: %w", err) + } + sig, signErr := snkeyring.SignBytes(d.keyring, d.keyName, []byte(transcriptHashHex)) + if signErr != nil { + return fmt.Errorf("sign transcript: %w", signErr) + } + + d.buffer.Append(epochID, &audittypes.StorageProofResult{ + TargetSupernodeAccount: target, + ChallengerSupernodeAccount: d.self, + TicketId: ticketID, + BucketType: bucket, + ArtifactClass: class, + ArtifactOrdinal: ordinal, + ArtifactKey: artifactKey, + ArtifactCount: artifactCount, + ResultClass: audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_PASS, + TranscriptHash: transcriptHashHex, + DerivationInputHash: derivHash, + ChallengerSignature: hex.EncodeToString(sig), + }) + return nil +} + +func (d *LEP6Dispatcher) appendFail( + ctx context.Context, + epochID uint64, + target string, + bucket audittypes.StorageProofBucketType, + ticketID string, + class audittypes.StorageProofArtifactClass, + ordinal uint32, + artifactCount uint32, + artifactKey string, + derivHash string, + resultClass audittypes.StorageProofResultClass, + reason string, +) { + transcriptHashHex, err := deterministic.TranscriptHash(deterministic.TranscriptInputs{ + EpochID: epochID, + ChallengerSupernodeAccount: d.self, + TargetSupernodeAccount: target, + TicketID: ticketID, + Bucket: bucket, + ArtifactClass: class, + ArtifactOrdinal: ordinal, + ArtifactKey: artifactKey, + DerivationInputHash: derivHash, + // CompoundProofHashHex empty on failure — captures the non-pass shape. + }) + if err != nil { + logtrace.Warn(ctx, "lep6 dispatch: fail transcript hash error", logtrace.Fields{ + "epoch_id": epochID, "target": target, "ticket": ticketID, "error": err.Error(), + }) + return + } + sig, _ := snkeyring.SignBytes(d.keyring, d.keyName, []byte(transcriptHashHex)) + + d.buffer.Append(epochID, &audittypes.StorageProofResult{ + TargetSupernodeAccount: target, + ChallengerSupernodeAccount: d.self, + TicketId: ticketID, + BucketType: bucket, + ArtifactClass: class, + ArtifactOrdinal: ordinal, + ArtifactKey: artifactKey, + ArtifactCount: artifactCount, + ResultClass: resultClass, + TranscriptHash: transcriptHashHex, + DerivationInputHash: derivHash, + ChallengerSignature: hex.EncodeToString(sig), + Details: reason, + }) +} + +func deriveCompoundChallengeID(seed []byte, epochID uint64, target, ticketID string, class audittypes.StorageProofArtifactClass, ordinal uint32) string { + h := blake3.New(32, nil) + _, _ = h.Write(seed) + _, _ = h.Write([]byte(fmt.Sprintf("lep6:%d:%s:%s:%d:%d", epochID, target, ticketID, int32(class), ordinal))) + return hex.EncodeToString(h.Sum(nil)) +} + +func classifyProofFailure(err error, reason string) audittypes.StorageProofResultClass { + if err == nil { + lower := strings.ToLower(strings.TrimSpace(reason)) + if lower == "" || strings.Contains(lower, "timeout") || strings.Contains(lower, "no response") { + return audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_TIMEOUT_OR_NO_RESPONSE + } + return audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT + } + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + return audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_TIMEOUT_OR_NO_RESPONSE + } + if st, ok := status.FromError(err); ok { + switch st.Code() { + case codes.DeadlineExceeded, codes.Canceled, codes.Unavailable: + return audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_TIMEOUT_OR_NO_RESPONSE + } + } + return audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT +} diff --git a/supernode/storage_challenge/lep6_dispatch_test.go b/supernode/storage_challenge/lep6_dispatch_test.go new file mode 100644 index 00000000..20285335 --- /dev/null +++ b/supernode/storage_challenge/lep6_dispatch_test.go @@ -0,0 +1,429 @@ +package storage_challenge + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "testing" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + supernodepb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + lumeraMock "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + auditmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit" + nodemod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" + "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge/deterministic" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/cosmos/go-bip39" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "lukechampine.com/blake3" +) + +// dispatchAuditModule is an in-test stub of audit.Module used to drive +// LEP6Dispatcher per-test; mirrors the host_reporter test pattern. +type dispatchAuditModule struct { + params *audittypes.QueryParamsResponse + anchor *audittypes.QueryEpochAnchorResponse + assigned *audittypes.QueryAssignedTargetsResponse +} + +var _ auditmod.Module = (*dispatchAuditModule)(nil) + +func (s *dispatchAuditModule) GetParams(ctx context.Context) (*audittypes.QueryParamsResponse, error) { + return s.params, nil +} +func (s *dispatchAuditModule) GetEpochAnchor(ctx context.Context, epochID uint64) (*audittypes.QueryEpochAnchorResponse, error) { + return s.anchor, nil +} +func (s *dispatchAuditModule) GetCurrentEpoch(ctx context.Context) (*audittypes.QueryCurrentEpochResponse, error) { + return &audittypes.QueryCurrentEpochResponse{}, nil +} +func (s *dispatchAuditModule) GetCurrentEpochAnchor(ctx context.Context) (*audittypes.QueryCurrentEpochAnchorResponse, error) { + return &audittypes.QueryCurrentEpochAnchorResponse{}, nil +} +func (s *dispatchAuditModule) GetAssignedTargets(ctx context.Context, supernodeAccount string, epochID uint64) (*audittypes.QueryAssignedTargetsResponse, error) { + return s.assigned, nil +} +func (s *dispatchAuditModule) GetEpochReport(ctx context.Context, epochID uint64, supernodeAccount string) (*audittypes.QueryEpochReportResponse, error) { + return &audittypes.QueryEpochReportResponse{}, nil +} +func (s *dispatchAuditModule) GetNodeSuspicionState(ctx context.Context, supernodeAccount string) (*audittypes.QueryNodeSuspicionStateResponse, error) { + return &audittypes.QueryNodeSuspicionStateResponse{}, nil +} +func (s *dispatchAuditModule) GetReporterReliabilityState(ctx context.Context, reporterAccount string) (*audittypes.QueryReporterReliabilityStateResponse, error) { + return &audittypes.QueryReporterReliabilityStateResponse{}, nil +} +func (s *dispatchAuditModule) GetTicketDeteriorationState(ctx context.Context, ticketID string) (*audittypes.QueryTicketDeteriorationStateResponse, error) { + return &audittypes.QueryTicketDeteriorationStateResponse{}, nil +} +func (s *dispatchAuditModule) GetHealOp(ctx context.Context, healOpID uint64) (*audittypes.QueryHealOpResponse, error) { + return &audittypes.QueryHealOpResponse{}, nil +} +func (s *dispatchAuditModule) GetHealOpsByStatus(ctx context.Context, status audittypes.HealOpStatus, pagination *query.PageRequest) (*audittypes.QueryHealOpsByStatusResponse, error) { + return &audittypes.QueryHealOpsByStatusResponse{}, nil +} +func (s *dispatchAuditModule) GetHealOpsByTicket(ctx context.Context, ticketID string, pagination *query.PageRequest) (*audittypes.QueryHealOpsByTicketResponse, error) { + return &audittypes.QueryHealOpsByTicketResponse{}, nil +} + +// stubTicketProvider returns a fixed list per target. +type stubTicketProvider struct { + tickets map[string][]TicketDescriptor + err error +} + +func (s stubTicketProvider) TicketsForTarget(_ context.Context, target string) ([]TicketDescriptor, error) { + if s.err != nil { + return nil, s.err + } + return s.tickets[target], nil +} + +// stubMetaProvider returns a fixed cascade meta + size for any ticket. +type stubMetaProvider struct { + meta *actiontypes.CascadeMetadata + size uint64 + err error +} + +func (s stubMetaProvider) GetCascadeMetadata(_ context.Context, _ string) (*actiontypes.CascadeMetadata, uint64, error) { + if s.err != nil { + return nil, 0, s.err + } + return s.meta, s.size, nil +} + +// stubCompoundClient implements SupernodeCompoundClient. +type stubCompoundClient struct { + resp *supernodepb.GetCompoundProofResponse + err error +} + +func (s *stubCompoundClient) GetCompoundProof(_ context.Context, _ *supernodepb.GetCompoundProofRequest) (*supernodepb.GetCompoundProofResponse, error) { + return s.resp, s.err +} +func (s *stubCompoundClient) Close() error { return nil } + +// stubFactory always returns the same stubCompoundClient. +type stubFactory struct { + client *stubCompoundClient + err error +} + +func (s *stubFactory) Dial(_ context.Context, _ string) (SupernodeCompoundClient, error) { + if s.err != nil { + return nil, s.err + } + return s.client, nil +} + +func newDispatchKeyringAndIdentity(t *testing.T) (keyring.Keyring, string, string) { + t.Helper() + ir := codectypes.NewInterfaceRegistry() + cryptocodec.RegisterInterfaces(ir) + cdc := codec.NewProtoCodec(ir) + kr := keyring.NewInMemory(cdc) + entropy, err := bip39.NewEntropy(128) + require.NoError(t, err) + mnemonic, err := bip39.NewMnemonic(entropy) + require.NoError(t, err) + algos, _ := kr.SupportedAlgorithms() + algo, err := keyring.NewSigningAlgoFromString("secp256k1", algos) + require.NoError(t, err) + hdPath := hd.CreateHDPath(118, 0, 0).String() + rec, err := kr.NewAccount("dispatcher-test", mnemonic, "", hdPath, algo) + require.NoError(t, err) + addr, err := rec.GetAddress() + require.NoError(t, err) + return kr, "dispatcher-test", addr.String() +} + +// makeAnchor returns a deterministic EpochAnchor with a 32-byte seed +// derived from the epoch id so tests are reproducible across runs. +func makeAnchor(epochID uint64, endHeight int64, targets ...string) audittypes.EpochAnchor { + seed := sha256.Sum256([]byte("test-seed")) + return audittypes.EpochAnchor{ + EpochId: epochID, + EpochEndHeight: endHeight, + EpochLengthBlocks: 100, + Seed: seed[:], + ActiveSupernodeAccounts: append([]string{}, targets...), + TargetSupernodeAccounts: append([]string{}, targets...), + } +} + +// defaultParams returns audit Params with bucket thresholds matching the +// chain's defaults (3*EpochLengthBlocks RECENT, 30*EpochLengthBlocks OLD) +// and the requested enforcement mode. +func defaultParams(mode audittypes.StorageTruthEnforcementMode) audittypes.Params { + return audittypes.Params{ + StorageTruthEnforcementMode: mode, + StorageTruthRecentBucketMaxBlocks: 300, + StorageTruthOldBucketMinBlocks: 3000, + StorageTruthCompoundRangesPerArtifact: uint32(deterministic.LEP6CompoundRangesPerArtifact), + StorageTruthCompoundRangeLenBytes: uint32(deterministic.LEP6CompoundRangeLenBytes), + } +} + +// newDispatcher wires a dispatcher with the given audit module + factory + +// providers. Returns the dispatcher and the buffer (for assertions). +func newDispatcher( + t *testing.T, + audit *dispatchAuditModule, + factory SupernodeClientFactory, + tickets TicketProvider, + meta CascadeMetaProvider, +) (*LEP6Dispatcher, *Buffer) { + t.Helper() + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + mockLumera := lumeraMock.NewMockClient(ctrl) + mockLumera.EXPECT().Audit().Return(audit).AnyTimes() + // Node() returns a typed-nil; only used when EpochAnchor.EpochEndHeight==0, + // which our tests always set non-zero, so this is unreachable in practice. + var nilNode nodemod.Module + mockLumera.EXPECT().Node().Return(nilNode).AnyTimes() + + kr, keyName, identity := newDispatchKeyringAndIdentity(t) + buf := NewBuffer() + d, err := NewLEP6Dispatcher(mockLumera, kr, keyName, identity, factory, tickets, meta, buf) + require.NoError(t, err) + return d, buf +} + +func TestDispatchEpoch_ModeUnspecified_NoOp(t *testing.T) { + audit := &dispatchAuditModule{ + params: &audittypes.QueryParamsResponse{ + Params: defaultParams(audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED), + }, + } + d, buf := newDispatcher(t, audit, &stubFactory{}, NoTicketProvider{}, stubMetaProvider{}) + + require.NoError(t, d.DispatchEpoch(context.Background(), 7)) + require.Empty(t, buf.CollectResults(7), "buffer must be empty under UNSPECIFIED mode") +} + +func TestDispatchEpoch_ModeShadow_AppendsResults(t *testing.T) { + const epochID uint64 = 11 + anchor := makeAnchor(epochID, 500, "sn-target") + audit := &dispatchAuditModule{ + params: &audittypes.QueryParamsResponse{Params: defaultParams(audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_SHADOW)}, + anchor: &audittypes.QueryEpochAnchorResponse{Anchor: anchor}, + assigned: &audittypes.QueryAssignedTargetsResponse{TargetSupernodeAccounts: []string{"sn-target"}}, + } + // NoTicketProvider → both buckets emit NO_ELIGIBLE_TICKET. + d, buf := newDispatcher(t, audit, &stubFactory{}, NoTicketProvider{}, stubMetaProvider{}) + + require.NoError(t, d.DispatchEpoch(context.Background(), epochID)) + results := buf.CollectResults(epochID) + require.Len(t, results, 2, "expected one NO_ELIGIBLE_TICKET per bucket") + for _, r := range results { + require.Equal(t, audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_NO_ELIGIBLE_TICKET, r.ResultClass) + require.NotEmpty(t, r.TranscriptHash) + require.NotEmpty(t, r.ChallengerSignature) + } +} + +func TestDispatchEpoch_NoEligibleTicket_EmitsClass(t *testing.T) { + const epochID uint64 = 13 + // Anchor end-height=10000; tickets anchored at heights that fall in NEITHER + // bucket. Gap is delta ∈ (recent_max=300, old_min=3000), i.e. 301..2999. + // Pick anchor=8000 → currentHeight-anchor=2000 → UNSPECIFIED bucket. + anchor := makeAnchor(epochID, 10000, "sn-target") + audit := &dispatchAuditModule{ + params: &audittypes.QueryParamsResponse{Params: defaultParams(audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_SHADOW)}, + anchor: &audittypes.QueryEpochAnchorResponse{Anchor: anchor}, + assigned: &audittypes.QueryAssignedTargetsResponse{TargetSupernodeAccounts: []string{"sn-target"}}, + } + tickets := stubTicketProvider{tickets: map[string][]TicketDescriptor{ + "sn-target": {{TicketID: "tkt-gap", AnchorBlock: 8000}}, + }} + d, buf := newDispatcher(t, audit, &stubFactory{}, tickets, stubMetaProvider{}) + + require.NoError(t, d.DispatchEpoch(context.Background(), epochID)) + results := buf.CollectResults(epochID) + require.Len(t, results, 2) + for _, r := range results { + require.Equal(t, audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_NO_ELIGIBLE_TICKET, r.ResultClass) + } +} + +// TestDispatchEpoch_GetCompoundProofError_EmitsFailClass exercises the dial / +// RPC failure path: when the ticket is eligible and the RPC returns an error, +// the dispatcher emits a FAIL-class result (not bubble the error up) so the +// chain still sees coverage. +func TestDispatchEpoch_GetCompoundProofError_EmitsFailClass(t *testing.T) { + const epochID uint64 = 17 + // EpochEndHeight=200, ticket anchor=100 → currentHeight-anchor=100 < 300 → + // RECENT bucket eligible. + anchor := makeAnchor(epochID, 200, "sn-target") + audit := &dispatchAuditModule{ + params: &audittypes.QueryParamsResponse{Params: defaultParams(audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_SHADOW)}, + anchor: &audittypes.QueryEpochAnchorResponse{Anchor: anchor}, + assigned: &audittypes.QueryAssignedTargetsResponse{TargetSupernodeAccounts: []string{"sn-target"}}, + } + tickets := stubTicketProvider{tickets: map[string][]TicketDescriptor{ + "sn-target": {{TicketID: "tkt-rpc-fail", AnchorBlock: 100}}, + }} + // Cascade meta: SYMBOL-only with one id; artifact_size big enough for 4*256. + meta := stubMetaProvider{ + meta: &actiontypes.CascadeMetadata{RqIdsIc: 0, RqIdsMax: 1, RqIdsIds: []string{"sym-0"}}, + size: 4 * 1024, + } + // Factory returns a client whose GetCompoundProof errors. + factory := &stubFactory{client: &stubCompoundClient{err: errors.New("rpc unavailable")}} + d, buf := newDispatcher(t, audit, factory, tickets, meta) + + require.NoError(t, d.DispatchEpoch(context.Background(), epochID)) + results := buf.CollectResults(epochID) + require.NotEmpty(t, results) + // Expect a FAIL class for the RECENT bucket (single eligible ticket) and + // NO_ELIGIBLE for OLD (empty there). + var sawFail, sawNoEligible bool + for _, r := range results { + switch r.ResultClass { + case audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT: + sawFail = true + require.Contains(t, r.Details, "rpc unavailable") + case audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_NO_ELIGIBLE_TICKET: + sawNoEligible = true + } + } + require.True(t, sawFail, "expected at least one FAIL class result on RPC error") + require.True(t, sawNoEligible, "expected NO_ELIGIBLE for the OLD bucket") +} + +func TestClassifyProofFailure_NonTimeoutRPCErrorsAreInvalidTranscript(t *testing.T) { + require.Equal(t, + audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT, + classifyProofFailure(status.Error(codes.PermissionDenied, "not allowed"), "not allowed"), + ) + require.Equal(t, + audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT, + classifyProofFailure(errors.New("connection refused"), "connection refused"), + ) + require.Equal(t, + audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_INVALID_TRANSCRIPT, + classifyProofFailure(nil, "recipient validation failed"), + ) +} + +func TestClassifyProofFailure_TimeoutsRemainTimeoutOrNoResponse(t *testing.T) { + require.Equal(t, + audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_TIMEOUT_OR_NO_RESPONSE, + classifyProofFailure(context.DeadlineExceeded, "deadline exceeded"), + ) + require.Equal(t, + audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_TIMEOUT_OR_NO_RESPONSE, + classifyProofFailure(status.Error(codes.Unavailable, "unavailable"), "unavailable"), + ) + require.Equal(t, + audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_TIMEOUT_OR_NO_RESPONSE, + classifyProofFailure(nil, "no response"), + ) +} + +func TestDispatchEpoch_GetCompoundProofTimeout_EmitsTimeoutClass(t *testing.T) { + const epochID uint64 = 18 + anchor := makeAnchor(epochID, 200, "sn-target") + audit := &dispatchAuditModule{ + params: &audittypes.QueryParamsResponse{Params: defaultParams(audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_SHADOW)}, + anchor: &audittypes.QueryEpochAnchorResponse{Anchor: anchor}, + assigned: &audittypes.QueryAssignedTargetsResponse{TargetSupernodeAccounts: []string{"sn-target"}}, + } + tickets := stubTicketProvider{tickets: map[string][]TicketDescriptor{ + "sn-target": {{TicketID: "tkt-timeout", AnchorBlock: 100}}, + }} + meta := stubMetaProvider{ + meta: &actiontypes.CascadeMetadata{RqIdsIc: 0, RqIdsMax: 1, RqIdsIds: []string{"sym-0"}}, + size: 4 * 1024, + } + factory := &stubFactory{client: &stubCompoundClient{err: context.DeadlineExceeded}} + d, buf := newDispatcher(t, audit, factory, tickets, meta) + + require.NoError(t, d.DispatchEpoch(context.Background(), epochID)) + results := buf.CollectResults(epochID) + require.NotEmpty(t, results) + var sawTimeout bool + for _, r := range results { + if r.TicketId == "tkt-timeout" { + sawTimeout = true + require.Equal(t, audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_TIMEOUT_OR_NO_RESPONSE, r.ResultClass) + } + } + require.True(t, sawTimeout, "expected timeout-class result for deadline exceeded RPC") +} + +// TestDispatchEpoch_HappyPath_EmitsPassResult exercises the full PASS path: +// eligible ticket, valid cascade meta, GetCompoundProof returns 4 ranges of +// 256B each whose BLAKE3 hash matches resp.ProofHashHex. Dispatcher must +// emit PASS-class result with non-empty transcript + signature + derivation +// hash. +// +// Only RECENT is exercised here; OLD bucket has no eligible ticket and emits +// NO_ELIGIBLE, which is also asserted. +func TestDispatchEpoch_HappyPath_EmitsPassResult(t *testing.T) { + const epochID uint64 = 19 + anchor := makeAnchor(epochID, 200, "sn-target") + audit := &dispatchAuditModule{ + params: &audittypes.QueryParamsResponse{Params: defaultParams(audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL)}, + anchor: &audittypes.QueryEpochAnchorResponse{Anchor: anchor}, + assigned: &audittypes.QueryAssignedTargetsResponse{TargetSupernodeAccounts: []string{"sn-target"}}, + } + tickets := stubTicketProvider{tickets: map[string][]TicketDescriptor{ + "sn-target": {{TicketID: "tkt-happy", AnchorBlock: 100}}, + }} + meta := stubMetaProvider{ + meta: &actiontypes.CascadeMetadata{RqIdsIc: 0, RqIdsMax: 1, RqIdsIds: []string{"sym-0"}}, + size: 4 * 1024, + } + + // Construct a response with 4 ranges of 256 bytes each (deterministic + // content) and a matching BLAKE3 proof hash. + rangeBytes := make([][]byte, deterministic.LEP6CompoundRangesPerArtifact) + hasher := blake3.New(32, nil) + for i := range rangeBytes { + buf := make([]byte, deterministic.LEP6CompoundRangeLenBytes) + // Fill with i-stamped bytes for determinism. + for j := range buf { + buf[j] = byte((i*7 + j) & 0xFF) + } + rangeBytes[i] = buf + _, _ = hasher.Write(buf) + } + proofHashHex := hex.EncodeToString(hasher.Sum(nil)) + resp := &supernodepb.GetCompoundProofResponse{ + Ok: true, + RangeBytes: rangeBytes, + ProofHashHex: proofHashHex, + } + factory := &stubFactory{client: &stubCompoundClient{resp: resp}} + d, buf := newDispatcher(t, audit, factory, tickets, meta) + + require.NoError(t, d.DispatchEpoch(context.Background(), epochID)) + results := buf.CollectResults(epochID) + require.NotEmpty(t, results) + + var sawPass bool + for _, r := range results { + if r.ResultClass == audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_PASS { + sawPass = true + require.Equal(t, "tkt-happy", r.TicketId) + require.Equal(t, audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_RECENT, r.BucketType) + require.NotEmpty(t, r.TranscriptHash) + require.NotEmpty(t, r.DerivationInputHash) + require.NotEmpty(t, r.ChallengerSignature) + require.NotEmpty(t, r.ArtifactKey) + } + } + require.True(t, sawPass, "expected a PASS-class result on happy path") +} diff --git a/supernode/storage_challenge/result_buffer.go b/supernode/storage_challenge/result_buffer.go new file mode 100644 index 00000000..d1a920f5 --- /dev/null +++ b/supernode/storage_challenge/result_buffer.go @@ -0,0 +1,151 @@ +package storage_challenge + +import ( + "context" + "sort" + "sync" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge" +) + +// Buffer accumulates StorageProofResults emitted by the per-epoch challenger +// loop and surfaces them to the host reporter (which submits MsgSubmitEpochReport). +// +// Buffer satisfies host_reporter.ProofResultProvider: +// +// CollectResults(epochID uint64) []*audittypes.StorageProofResult +// +// The chain audit keeper rejects an entire epoch report if its +// storage_proof_results slice exceeds MaxStorageProofResultsPerReport +// (lumera/x/audit/v1/types/keys.go:11-13, enforced in +// x/audit/v1/keeper/msg_submit_epoch_report.go:126-130). Because two +// independent challengers may produce overlapping result sets that combine +// past the cap, CollectResults applies a deterministic self-throttle: drop +// non-RECENT bucket entries first (oldest by ticket_id lex), then drop oldest +// RECENT entries by the same order, until the slice fits. +// +// Note: audittypes.StorageProofResult has no EpochId field; the challenger +// supplies the binding epoch at Append time so the buffer can drain only the +// relevant epoch and leave entries for other epochs intact. +// +// Buffer is safe for concurrent use. +type Buffer struct { + mu sync.Mutex + byEpoch map[uint64][]*audittypes.StorageProofResult +} + +// NewBuffer returns an empty Buffer. +func NewBuffer() *Buffer { + return &Buffer{byEpoch: make(map[uint64][]*audittypes.StorageProofResult)} +} + +// Append stores result under epochID. Nil results are ignored. +func (b *Buffer) Append(epochID uint64, result *audittypes.StorageProofResult) { + if result == nil { + return + } + b.mu.Lock() + defer b.mu.Unlock() + b.byEpoch[epochID] = append(b.byEpoch[epochID], result) +} + +// CollectResults drains and returns the buffered results for epochID, applying +// the LEP-6 16-cap self-throttle. Results buffered for other epochs are left +// intact. The returned slice is sorted deterministically by +// (BucketType, EpochId, TicketId) so that downstream signing/serialisation is +// stable across challengers and re-runs. +func (b *Buffer) CollectResults(epochID uint64) []*audittypes.StorageProofResult { + b.mu.Lock() + matching := b.byEpoch[epochID] + delete(b.byEpoch, epochID) + b.mu.Unlock() + + if len(matching) == 0 { + return nil + } + + // Make a defensive copy so we don't aliase caller data when we sort. + out := make([]*audittypes.StorageProofResult, len(matching)) + copy(out, matching) + + const maxKeep = storagechallenge.MaxStorageProofResultsPerReport + + if len(out) > maxKeep { + out = throttleResults(epochID, out, maxKeep) + } + + sortDeterministic(out) + return out +} + +// throttleResults enforces len(results) <= maxKeep by: +// 1. Dropping oldest non-RECENT entries by ticket_id lex. +// 2. If still over cap (only RECENT remain), dropping oldest RECENT by same lex. +// +// All results in this call are bound to the same epochID, so the +// (epoch_id asc, ticket_id asc) lex specified in the LEP-6 plan collapses to +// ticket_id asc here. Kept for forward compatibility if the buffer ever +// throttles across epochs. +// +// A Warn log is emitted when throttling activates. +func throttleResults(epochID uint64, results []*audittypes.StorageProofResult, maxKeep int) []*audittypes.StorageProofResult { + originalCount := len(results) + + recent := make([]*audittypes.StorageProofResult, 0, len(results)) + nonRecent := make([]*audittypes.StorageProofResult, 0, len(results)) + for _, r := range results { + if r == nil { + continue + } + if r.BucketType == audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_RECENT { + recent = append(recent, r) + } else { + nonRecent = append(nonRecent, r) + } + } + + // Sort each partition oldest-first (ticket_id asc) so dropping from index 0 + // drops oldest. + sort.SliceStable(nonRecent, func(i, j int) bool { return nonRecent[i].TicketId < nonRecent[j].TicketId }) + sort.SliceStable(recent, func(i, j int) bool { return recent[i].TicketId < recent[j].TicketId }) + + total := len(recent) + len(nonRecent) + for total > maxKeep && len(nonRecent) > 0 { + nonRecent = nonRecent[1:] + total-- + } + for total > maxKeep && len(recent) > 0 { + recent = recent[1:] + total-- + } + + kept := make([]*audittypes.StorageProofResult, 0, total) + kept = append(kept, recent...) + kept = append(kept, nonRecent...) + + logtrace.Warn(context.Background(), "storage_challenge: result buffer throttled to chain cap", logtrace.Fields{ + "epoch_id": epochID, + "original": originalCount, + "kept": len(kept), + "dropped": originalCount - len(kept), + "cap": maxKeep, + "policy": "drop-non-RECENT-first", + }) + + return kept +} + +// sortDeterministic orders results by (BucketType, TicketId). All results in +// a single CollectResults call share the same epoch, so EpochId would not +// further disambiguate. +func sortDeterministic(results []*audittypes.StorageProofResult) { + sort.SliceStable(results, func(i, j int) bool { + a, b := results[i], results[j] + if a.BucketType != b.BucketType { + return a.BucketType < b.BucketType + } + return a.TicketId < b.TicketId + }) +} diff --git a/supernode/storage_challenge/result_buffer_test.go b/supernode/storage_challenge/result_buffer_test.go new file mode 100644 index 00000000..bfc58644 --- /dev/null +++ b/supernode/storage_challenge/result_buffer_test.go @@ -0,0 +1,325 @@ +package storage_challenge + +import ( + "fmt" + "reflect" + "sync" + "testing" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" +) + +const ( + bucketRecent = audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_RECENT + bucketOld = audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_OLD +) + +func mkResult(bucket audittypes.StorageProofBucketType, ticket string) *audittypes.StorageProofResult { + return &audittypes.StorageProofResult{ + TicketId: ticket, + BucketType: bucket, + } +} + +func mkResultForTarget(bucket audittypes.StorageProofBucketType, ticket, target string) *audittypes.StorageProofResult { + return &audittypes.StorageProofResult{ + TicketId: ticket, + BucketType: bucket, + TargetSupernodeAccount: target, + } +} + +// ticketIDsOf extracts ticket IDs in slice order. +func ticketIDsOf(rs []*audittypes.StorageProofResult) []string { + out := make([]string, len(rs)) + for i, r := range rs { + out[i] = r.TicketId + } + return out +} + +func TestBuffer_BelowCap_ReturnsAllSortedDeterministically(t *testing.T) { + b := NewBuffer() + // Append in scrambled order; expect sort by (BucketType, TicketId). + inputs := []*audittypes.StorageProofResult{ + mkResult(bucketOld, "t-old-b"), + mkResult(bucketRecent, "t-recent-c"), + mkResult(bucketRecent, "t-recent-a"), + mkResult(bucketOld, "t-old-a"), + } + for _, r := range inputs { + b.Append(5, r) + } + got := b.CollectResults(5) + if len(got) != 4 { + t.Fatalf("want 4 results, got %d", len(got)) + } + // RECENT (=1) sorts before OLD (=2) because lower numeric enum. + want := []string{"t-recent-a", "t-recent-c", "t-old-a", "t-old-b"} + if !reflect.DeepEqual(ticketIDsOf(got), want) { + t.Fatalf("ordering mismatch:\n got: %v\n want: %v", ticketIDsOf(got), want) + } + // Buffer drained for epoch 5. + if got2 := b.CollectResults(5); len(got2) != 0 { + t.Fatalf("expected drained buffer, got %d results", len(got2)) + } +} + +func TestBuffer_AboveCap_DropsNonRecentFirst(t *testing.T) { + b := NewBuffer() + // 10 RECENT + 8 OLD = 18 total, cap 16 → drop 2 OLD oldest. Kept: 10 R + 6 O. + for i := 0; i < 10; i++ { + b.Append(7, mkResult(bucketRecent, fmt.Sprintf("recent-%02d", i))) + } + for i := 0; i < 8; i++ { + b.Append(7, mkResult(bucketOld, fmt.Sprintf("old-%02d", i))) + } + got := b.CollectResults(7) + if len(got) != 16 { + t.Fatalf("want 16 results, got %d", len(got)) + } + var nRecent, nOld int + for _, r := range got { + switch r.BucketType { + case bucketRecent: + nRecent++ + case bucketOld: + nOld++ + } + } + if nRecent != 10 || nOld != 6 { + t.Fatalf("want 10 RECENT + 6 OLD, got %d RECENT + %d OLD", nRecent, nOld) + } + // The two oldest OLD entries by ticket_id ("old-00", "old-01") must be the dropped ones. + for _, r := range got { + if r.TicketId == "old-00" || r.TicketId == "old-01" { + t.Fatalf("expected oldest OLD entries dropped; %q present", r.TicketId) + } + } +} + +func TestBuffer_AboveCap_OnlyRecent_DropsOldest(t *testing.T) { + b := NewBuffer() + // 20 RECENT, cap 16 → drop 4 oldest by ticket_id lex. + for i := 0; i < 20; i++ { + b.Append(9, mkResult(bucketRecent, fmt.Sprintf("r-%02d", i))) + } + got := b.CollectResults(9) + if len(got) != 16 { + t.Fatalf("want 16 results, got %d", len(got)) + } + want := []string{ + "r-04", "r-05", "r-06", "r-07", "r-08", "r-09", + "r-10", "r-11", "r-12", "r-13", "r-14", "r-15", + "r-16", "r-17", "r-18", "r-19", + } + if !reflect.DeepEqual(ticketIDsOf(got), want) { + t.Fatalf("ordering mismatch:\n got: %v\n want: %v", ticketIDsOf(got), want) + } +} + +func TestBuffer_DeterministicSorting(t *testing.T) { + build := func() []*audittypes.StorageProofResult { + b := NewBuffer() + // Mix and match in a deliberately scrambled order. + seqs := []*audittypes.StorageProofResult{ + mkResult(bucketOld, "ticket-z"), + mkResult(bucketRecent, "ticket-m"), + mkResult(bucketOld, "ticket-a"), + mkResult(bucketRecent, "ticket-b"), + mkResult(bucketRecent, "ticket-aa"), + mkResult(bucketOld, "ticket-c"), + } + for _, r := range seqs { + b.Append(11, r) + } + return b.CollectResults(11) + } + a := ticketIDsOf(build()) + c := ticketIDsOf(build()) + if !reflect.DeepEqual(a, c) { + t.Fatalf("non-deterministic output:\n run1: %v\n run2: %v", a, c) + } +} + +func TestBuffer_ConcurrentAppendDrain(t *testing.T) { + b := NewBuffer() + const writers = 8 + const perWriter = 50 + + var wg sync.WaitGroup + wg.Add(writers) + for w := 0; w < writers; w++ { + go func(w int) { + defer wg.Done() + for i := 0; i < perWriter; i++ { + bucket := bucketRecent + if i%3 == 0 { + bucket = bucketOld + } + b.Append(13, mkResult(bucket, fmt.Sprintf("w%d-i%03d", w, i))) + } + }(w) + } + + // Concurrent drainer racing with writers — also exercises the lock under -race. + done := make(chan struct{}) + go func() { + for { + select { + case <-done: + return + default: + _ = b.CollectResults(13) + } + } + }() + + wg.Wait() + close(done) + // Drain leftover (whatever the racing collector didn't drain). + _ = b.CollectResults(13) + + // Buffer must be empty for the epoch. + if got := b.CollectResults(13); len(got) != 0 { + t.Fatalf("expected empty buffer after final drain, got %d", len(got)) + } +} + +// TestBuffer_FullModeAssignedTargetCoverageBelowCap is the LEP-6 v3-plan PR3 +// item-5 invariant guard: when the dispatcher emits the realistic chain-bound +// workload (one assigned target → one RECENT + one OLD per epoch, far under +// the 16-result cap), the buffer MUST surface both bucket entries for that +// target untouched. This is the only path that runs in production today +// because chain-side AssignTargets returns at most one target per epoch. +// +// Note: the throttle policy ("drop non-RECENT first") does NOT preserve +// per-target RECENT+OLD coverage if the buffer ever exceeds cap. That is +// intentional and acceptable here because the dispatcher is structurally +// bounded to ≤2 emissions per assigned target. If a future change widens +// emissions (e.g. multiple assigned targets per epoch), the throttle policy +// must be revisited — see TestBuffer_OverCap_DropPolicyIsNotTargetAware +// below for the explicit pin of current behavior. +func TestBuffer_FullModeAssignedTargetCoverageBelowCap(t *testing.T) { + const target = "lumera1assignedtarget000000000000000000target" + b := NewBuffer() + + // Realistic FULL-mode emission: one RECENT + one OLD for the assigned + // target, plus a small amount of unrelated-target carryover (e.g. from + // a parallel challenger run for a different epoch slice). + b.Append(42, mkResultForTarget(bucketRecent, "ticket-recent-A", target)) + b.Append(42, mkResultForTarget(bucketOld, "ticket-old-A", target)) + b.Append(42, mkResultForTarget(bucketRecent, "ticket-recent-other", "lumera1other00000000000000000000000000other")) + b.Append(42, mkResultForTarget(bucketOld, "ticket-old-other", "lumera1other00000000000000000000000000other")) + + got := b.CollectResults(42) + if len(got) != 4 { + t.Fatalf("want 4 results below cap, got %d", len(got)) + } + + var sawTargetRecent, sawTargetOld bool + for _, r := range got { + if r.TargetSupernodeAccount != target { + continue + } + switch r.BucketType { + case bucketRecent: + if sawTargetRecent { + t.Fatalf("duplicate RECENT for assigned target") + } + sawTargetRecent = true + case bucketOld: + if sawTargetOld { + t.Fatalf("duplicate OLD for assigned target") + } + sawTargetOld = true + } + } + if !sawTargetRecent { + t.Fatalf("FULL coverage invariant violated: assigned target RECENT entry missing from CollectResults output") + } + if !sawTargetOld { + t.Fatalf("FULL coverage invariant violated: assigned target OLD entry missing from CollectResults output") + } +} + +// TestBuffer_OverCap_DropPolicyIsNotTargetAware pins the documented limitation +// of the current throttle: "drop non-RECENT first" is target-blind, so an +// assigned target's OLD entry CAN be dropped if the buffer ever exceeds 16. +// This is acceptable today because the dispatcher cannot realistically push +// the buffer over cap (chain assigns ≤1 target/epoch → ≤2 emissions). If this +// invariant ever changes, this test will catch the silent regression and force +// a target-aware throttle revision (see LEP-6 v3 plan §3 PR3 item 6, deferred +// to PR-4 ownership for heal-op driven multi-target scenarios). +func TestBuffer_OverCap_DropPolicyIsNotTargetAware(t *testing.T) { + const assignedTarget = "lumera1assignedtarget000000000000000000target" + const otherTarget = "lumera1other00000000000000000000000000other" + + b := NewBuffer() + // 14 RECENT for unrelated target + 1 RECENT + 1 OLD + 1 OLD (filler) for + // assigned target = 17 total → throttle drops 1 non-RECENT (oldest by + // ticket_id lex). The assigned target's OLD entry is at risk if its + // ticket_id sorts earlier than the filler's. + for i := 0; i < 14; i++ { + b.Append(99, mkResultForTarget(bucketRecent, fmt.Sprintf("other-recent-%02d", i), otherTarget)) + } + b.Append(99, mkResultForTarget(bucketRecent, "assigned-recent-A", assignedTarget)) + b.Append(99, mkResultForTarget(bucketOld, "assigned-old-A", assignedTarget)) + b.Append(99, mkResultForTarget(bucketOld, "filler-old-zzz", otherTarget)) + + got := b.CollectResults(99) + if len(got) != 16 { + t.Fatalf("want 16 (cap), got %d", len(got)) + } + + // Document current behavior: dropped one OLD by lex order. Either + // "assigned-old-A" or "filler-old-zzz" survives — current "drop oldest + // non-RECENT by ticket_id lex" implementation drops "assigned-old-A" + // because it sorts before "filler-old-zzz". This is the behavior pin — + // if a future change makes throttle target-aware (preserve assigned-target + // coverage even over cap), update this test accordingly. + var assignedOldKept, fillerOldKept bool + for _, r := range got { + switch r.TicketId { + case "assigned-old-A": + assignedOldKept = true + case "filler-old-zzz": + fillerOldKept = true + } + } + if assignedOldKept { + t.Fatalf("throttle became target-aware (kept assigned-target OLD) — update test or note the policy change") + } + if !fillerOldKept { + t.Fatalf("expected filler-old-zzz to survive (lex-greater non-RECENT survives drop-oldest policy); got dropped") + } +} + +func TestBuffer_PerEpochIsolation(t *testing.T) { + b := NewBuffer() + b.Append(5, mkResult(bucketRecent, "e5-a")) + b.Append(5, mkResult(bucketOld, "e5-b")) + b.Append(6, mkResult(bucketRecent, "e6-a")) + b.Append(6, mkResult(bucketOld, "e6-b")) + + got5 := b.CollectResults(5) + if len(got5) != 2 { + t.Fatalf("epoch 5: want 2, got %d", len(got5)) + } + for _, r := range got5 { + if r.TicketId != "e5-a" && r.TicketId != "e5-b" { + t.Fatalf("epoch 5 leaked foreign ticket %q", r.TicketId) + } + } + + // Epoch 6 must remain intact. + got6 := b.CollectResults(6) + if len(got6) != 2 { + t.Fatalf("epoch 6 lost data: want 2, got %d", len(got6)) + } + for _, r := range got6 { + if r.TicketId != "e6-a" && r.TicketId != "e6-b" { + t.Fatalf("epoch 6 leaked foreign ticket %q", r.TicketId) + } + } +} diff --git a/supernode/storage_challenge/service.go b/supernode/storage_challenge/service.go index b2227de0..5f3b7e06 100644 --- a/supernode/storage_challenge/service.go +++ b/supernode/storage_challenge/service.go @@ -69,6 +69,20 @@ type Service struct { grpcClient *grpcclient.Client grpcOpts *grpcclient.ClientOptions + + // lep6 is the LEP-6 compound storage challenge dispatcher. Optional: + // if nil the legacy fixed-range path is the only active flow. When + // non-nil, the dispatcher runs once per new epoch in addition to the + // legacy loop. Mode gating (UNSPECIFIED skips) lives inside + // LEP6Dispatcher.DispatchEpoch. + lep6 *LEP6Dispatcher +} + +// SetLEP6Dispatcher attaches the LEP-6 compound-challenge dispatcher. +// May be called once before Run; nil-safe at the call site (Run skips +// LEP-6 work when the field is nil). +func (s *Service) SetLEP6Dispatcher(d *LEP6Dispatcher) { + s.lep6 = d } type Config struct { @@ -253,6 +267,20 @@ func (s *Service) Run(ctx context.Context) error { continue } + // LEP-6 compound dispatch runs alongside the legacy single-range + // challenge for forward compatibility. The dispatcher gates + // internally on StorageTruthEnforcementMode (UNSPECIFIED skips), + // so it is dormant under chains that have not enabled storage + // truth enforcement and a no-op cost otherwise. + if s.lep6 != nil { + if err := s.lep6.DispatchEpoch(ctx, epochID); err != nil { + logtrace.Warn(ctx, "lep6 dispatch error", logtrace.Fields{ + "epoch_id": epochID, + "error": err.Error(), + }) + } + } + lastRunEpoch = epochID lastRunOK = true } diff --git a/supernode/storage_challenge/ticket_provider.go b/supernode/storage_challenge/ticket_provider.go new file mode 100644 index 00000000..4b647596 --- /dev/null +++ b/supernode/storage_challenge/ticket_provider.go @@ -0,0 +1,85 @@ +package storage_challenge + +import ( + "context" + "sort" + "strings" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" +) + +// ChainTicketProvider discovers finalized cascade actions assigned to a target +// supernode via the final Lumera action query API. It is intentionally small: +// the dispatcher only needs ticket/action IDs and their register-time block +// heights for LEP-6 bucket classification. +type ChainTicketProvider struct { + client lumera.Client +} + +// NewChainTicketProvider constructs a production TicketProvider backed by +// x/action ListActionsBySuperNode. +func NewChainTicketProvider(client lumera.Client) *ChainTicketProvider { + return &ChainTicketProvider{client: client} +} + +// TicketsForTarget returns finalized cascade actions that include the target +// supernode in their action.SuperNodes assignment list. +func (p *ChainTicketProvider) TicketsForTarget(ctx context.Context, targetSupernodeAccount string) ([]TicketDescriptor, error) { + if p == nil || p.client == nil || p.client.Action() == nil { + return nil, nil + } + target := strings.TrimSpace(targetSupernodeAccount) + if target == "" { + return nil, nil + } + + resp, err := p.client.Action().ListActionsBySuperNode(ctx, target) + if err != nil || resp == nil { + return nil, err + } + + out := make([]TicketDescriptor, 0, len(resp.Actions)) + seen := make(map[string]struct{}, len(resp.Actions)) + for _, act := range resp.Actions { + if !isEligibleCascadeAction(act, target) { + continue + } + id := strings.TrimSpace(act.ActionID) + if id == "" { + continue + } + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + out = append(out, TicketDescriptor{TicketID: id, AnchorBlock: act.BlockHeight}) + } + + sort.Slice(out, func(i, j int) bool { return out[i].TicketID < out[j].TicketID }) + return out, nil +} + +func isEligibleCascadeAction(act *actiontypes.Action, target string) bool { + if act == nil { + return false + } + if act.ActionType != actiontypes.ActionTypeCascade { + return false + } + // LEP-6 challenges storage only after cascade finalization. Lumera marks + // finalized/approved actions as DONE/APPROVED depending on the workflow + // phase; reject pending/processing/rejected/failed/expired actions. + if act.State != actiontypes.ActionStateDone && act.State != actiontypes.ActionStateApproved { + return false + } + if act.BlockHeight <= 0 { + return false + } + for _, sn := range act.SuperNodes { + if strings.TrimSpace(sn) == target { + return true + } + } + return false +} diff --git a/supernode/storage_challenge/ticket_provider_test.go b/supernode/storage_challenge/ticket_provider_test.go new file mode 100644 index 00000000..90e4d311 --- /dev/null +++ b/supernode/storage_challenge/ticket_provider_test.go @@ -0,0 +1,42 @@ +package storage_challenge + +import ( + "context" + "testing" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + lumeraMock "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + actionmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" + "go.uber.org/mock/gomock" +) + +func TestChainTicketProviderFiltersFinalizedCascadeActions(t *testing.T) { + ctrl := gomock.NewController(t) + client := lumeraMock.NewMockClient(ctrl) + actions := actionmod.NewMockModule(ctrl) + + client.EXPECT().Action().Return(actions).Times(2) + actions.EXPECT().ListActionsBySuperNode(gomock.Any(), "sn-target").Return(&actiontypes.QueryListActionsBySuperNodeResponse{Actions: []*actiontypes.Action{ + {ActionID: "sym-old", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 99, SuperNodes: []string{"sn-target"}}, + {ActionID: "sym-approved", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateApproved, BlockHeight: 100, SuperNodes: []string{"sn-target"}}, + {ActionID: "sym-old", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 99, SuperNodes: []string{"sn-target"}}, // duplicate + {ActionID: "pending", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStatePending, BlockHeight: 101, SuperNodes: []string{"sn-target"}}, + {ActionID: "wrong-type", ActionType: actiontypes.ActionTypeSense, State: actiontypes.ActionStateDone, BlockHeight: 102, SuperNodes: []string{"sn-target"}}, + {ActionID: "wrong-target", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 103, SuperNodes: []string{"other"}}, + {ActionID: "zero-height", ActionType: actiontypes.ActionTypeCascade, State: actiontypes.ActionStateDone, BlockHeight: 0, SuperNodes: []string{"sn-target"}}, + }}, nil) + + got, err := NewChainTicketProvider(client).TicketsForTarget(context.Background(), "sn-target") + if err != nil { + t.Fatalf("TicketsForTarget returned error: %v", err) + } + if len(got) != 2 { + t.Fatalf("want 2 eligible tickets, got %d: %#v", len(got), got) + } + if got[0].TicketID != "sym-approved" || got[0].AnchorBlock != 100 { + t.Fatalf("first sorted ticket mismatch: %#v", got[0]) + } + if got[1].TicketID != "sym-old" || got[1].AnchorBlock != 99 { + t.Fatalf("second sorted ticket mismatch: %#v", got[1]) + } +} diff --git a/supernode/transport/grpc/storage_challenge/handler.go b/supernode/transport/grpc/storage_challenge/handler.go index 34d4d5ba..0bb91208 100644 --- a/supernode/transport/grpc/storage_challenge/handler.go +++ b/supernode/transport/grpc/storage_challenge/handler.go @@ -8,28 +8,63 @@ import ( "strings" "time" + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/p2p" + snkeyring "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" + "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge/deterministic" "github.com/LumeraProtocol/supernode/v2/pkg/types" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "lukechampine.com/blake3" ) const maxServedSliceBytes = uint64(65_536) +// ArtifactReader is the recipient-side abstraction over cascade artifact storage +// used to satisfy LEP-6 multi-range compound storage challenges. The B.3 wiring +// will provide a cascade-module-backed implementation; tests inject their own. +type ArtifactReader interface { + ReadArtifactRange(ctx context.Context, class audittypes.StorageProofArtifactClass, key string, start, end uint64) ([]byte, error) +} + type Server struct { supernode.UnimplementedStorageChallengeServiceServer identity string p2p p2p.Client store queries.LocalStoreInterface + reader ArtifactReader + + // keyring + keyName are used to sign LEP-6 GetCompoundProof responses + // (recipient_signature) over the response transcript hash. Both may + // remain unset for legacy / test paths; signing is then skipped and + // recipient_signature stays empty. + keyring keyring.Keyring + keyName string } func NewServer(identity string, p2pClient p2p.Client, store queries.LocalStoreInterface) *Server { return &Server{identity: identity, p2p: p2pClient, store: store} } +// WithArtifactReader configures the server with the LEP-6 compound-challenge +// recipient-side reader. Returns the receiver for chained construction. +func (s *Server) WithArtifactReader(reader ArtifactReader) *Server { + s.reader = reader + return s +} + +// WithRecipientSigner configures the keyring + key name used to sign +// LEP-6 GetCompoundProof response transcripts. Returns the receiver for +// chained construction. +func (s *Server) WithRecipientSigner(kr keyring.Keyring, keyName string) *Server { + s.keyring = kr + s.keyName = keyName + return s +} + func (s *Server) GetSliceProof(ctx context.Context, req *supernode.GetSliceProofRequest) (*supernode.GetSliceProofResponse, error) { if req == nil { return nil, fmt.Errorf("nil request") @@ -238,3 +273,129 @@ func (s *Server) persistObserverVerification(ctx context.Context, req *supernode "ok": resp.Ok, }) } + +// GetCompoundProof serves a LEP-6 multi-range compound storage challenge. +// The challenger derives range count and range size from chain params; the +// recipient therefore validates only request-level structural invariants rather +// than re-asserting local compile-time defaults. It reads the requested ranges +// via the injected ArtifactReader, computes a BLAKE3 hash over the +// concatenation, and returns range_bytes alongside the proof hash. +func (s *Server) GetCompoundProof(ctx context.Context, req *supernode.GetCompoundProofRequest) (*supernode.GetCompoundProofResponse, error) { + if req == nil { + return nil, fmt.Errorf("nil request") + } + + resp := &supernode.GetCompoundProofResponse{ + ChallengeId: req.ChallengeId, + EpochId: req.EpochId, + TicketId: req.TicketId, + ArtifactClass: req.ArtifactClass, + ArtifactOrdinal: req.ArtifactOrdinal, + BucketType: req.BucketType, + ArtifactKey: req.ArtifactKey, + } + + if req.ChallengeId == "" { + resp.Error = "challenge_id is required" + return resp, nil + } + if req.EpochId == 0 { + resp.Error = "epoch_id must be > 0" + return resp, nil + } + if req.TicketId == "" { + resp.Error = "ticket_id is required" + return resp, nil + } + if len(req.Ranges) == 0 { + resp.Error = "at least one range is required" + return resp, nil + } + var requestRangeLen uint64 + for i, rng := range req.Ranges { + if rng == nil { + resp.Error = fmt.Sprintf("range[%d] is nil", i) + return resp, nil + } + if rng.End <= rng.Start { + resp.Error = fmt.Sprintf("range[%d] invalid: end (%d) must be > start (%d)", i, rng.End, rng.Start) + return resp, nil + } + size := rng.End - rng.Start + if i == 0 { + requestRangeLen = size + } else if size != requestRangeLen { + resp.Error = fmt.Sprintf("range[%d] invalid size: got %d, want %d from first range", i, size, requestRangeLen) + return resp, nil + } + if rng.End > req.ArtifactSize { + resp.Error = fmt.Sprintf("range[%d] out of bounds: end (%d) > artifact_size (%d)", i, rng.End, req.ArtifactSize) + return resp, nil + } + } + + if s.reader == nil { + resp.Error = "artifact reader not configured" + return resp, nil + } + + class := audittypes.StorageProofArtifactClass(req.ArtifactClass) + rangeBytes := make([][]byte, 0, len(req.Ranges)) + hasher := blake3.New(32, nil) + for i, rng := range req.Ranges { + buf, err := s.reader.ReadArtifactRange(ctx, class, req.ArtifactKey, rng.Start, rng.End) + if err != nil { + resp.Error = fmt.Sprintf("read range[%d] [%d,%d): %v", i, rng.Start, rng.End, err) + return resp, nil + } + rangeBytes = append(rangeBytes, buf) + _, _ = hasher.Write(buf) + } + sum := hasher.Sum(nil) + resp.RangeBytes = rangeBytes + resp.ProofHashHex = hex.EncodeToString(sum) + + // Sign the response transcript with the recipient's keyring identity. + // The transcript composition mirrors the challenger-side TranscriptHash + // composition (deterministic.TranscriptInputs) so the off-chain + // reporter can attach this signature to its StorageProofResult and + // the chain (post-LEP-6) can verify both endpoints corroborate the + // proof. Recipient acts here as the TARGET supernode. + if s.keyring != nil && strings.TrimSpace(s.keyName) != "" { + obs := append([]string(nil), req.ObserverAccounts...) + offsets := make([]uint64, 0, len(req.Ranges)) + for _, rng := range req.Ranges { + offsets = append(offsets, rng.Start) + } + derivHash, hashErr := deterministic.DerivationInputHash(req.Seed, req.TargetSupernodeAccount, req.TicketId, class, req.ArtifactOrdinal, offsets, requestRangeLen) + if hashErr != nil { + resp.Error = fmt.Sprintf("derivation input hash: %v", hashErr) + return resp, nil + } + txHash, hashErr := deterministic.TranscriptHash(deterministic.TranscriptInputs{ + EpochID: req.EpochId, + ChallengerSupernodeAccount: req.ChallengerAccount, + TargetSupernodeAccount: req.TargetSupernodeAccount, + TicketID: req.TicketId, + Bucket: audittypes.StorageProofBucketType(req.BucketType), + ArtifactClass: class, + ArtifactOrdinal: req.ArtifactOrdinal, + ArtifactKey: req.ArtifactKey, + DerivationInputHash: derivHash, + CompoundProofHashHex: resp.ProofHashHex, + ObserverIDs: obs, + }) + if hashErr != nil { + resp.Error = fmt.Sprintf("transcript hash: %v", hashErr) + return resp, nil + } + sig, signErr := snkeyring.SignBytes(s.keyring, s.keyName, []byte(txHash)) + if signErr != nil { + resp.Error = fmt.Sprintf("recipient sign: %v", signErr) + return resp, nil + } + resp.RecipientSignature = hex.EncodeToString(sig) + } + resp.Ok = true + return resp, nil +} diff --git a/supernode/transport/grpc/storage_challenge/handler_compound_test.go b/supernode/transport/grpc/storage_challenge/handler_compound_test.go new file mode 100644 index 00000000..c925e4b5 --- /dev/null +++ b/supernode/transport/grpc/storage_challenge/handler_compound_test.go @@ -0,0 +1,279 @@ +package storage_challenge + +import ( + "context" + "encoding/binary" + "encoding/hex" + "io" + "strings" + "testing" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge/deterministic" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/go-bip39" + "github.com/stretchr/testify/require" + "lukechampine.com/blake3" +) + +// deterministicReader produces reproducible bytes derived from +// (class, key, start, end) so tests can assert exact proof hashes. +type deterministicReader struct { + calls int + err error +} + +func (r *deterministicReader) ReadArtifactRange(_ context.Context, class audittypes.StorageProofArtifactClass, key string, start, end uint64) ([]byte, error) { + r.calls++ + if r.err != nil { + return nil, r.err + } + out := make([]byte, end-start) + seed := make([]byte, 0, 32+len(key)) + var sb [4]byte + binary.BigEndian.PutUint32(sb[:], uint32(class)) + seed = append(seed, sb[:]...) + seed = append(seed, []byte(key)...) + var ab [16]byte + binary.BigEndian.PutUint64(ab[0:8], start) + binary.BigEndian.PutUint64(ab[8:16], end) + seed = append(seed, ab[:]...) + h := blake3.New(int(end-start), nil) + _, _ = h.Write(seed) + copy(out, h.Sum(nil)) + return out, nil +} + +func compoundRequestWith(ranges []*supernode.ByteRange, artifactSize uint64) *supernode.GetCompoundProofRequest { + return &supernode.GetCompoundProofRequest{ + ChallengeId: "challenge-c1", + EpochId: 42, + TicketId: "ticket-1", + TargetSupernodeAccount: "sn-target", + ChallengerAccount: "sn-challenger", + ObserverAccounts: []string{"o1", "o2"}, + ArtifactClass: uint32(audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL), + ArtifactOrdinal: 3, + ArtifactCount: 8, + BucketType: uint32(audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_RECENT), + ArtifactKey: "artifact-key-1", + ArtifactSize: artifactSize, + Ranges: ranges, + } +} + +func fourValidRanges() []*supernode.ByteRange { + rl := uint64(deterministic.LEP6CompoundRangeLenBytes) + return []*supernode.ByteRange{ + {Start: 0, End: rl}, + {Start: 1024, End: 1024 + rl}, + {Start: 4096, End: 4096 + rl}, + {Start: 8192, End: 8192 + rl}, + } +} + +func newCompoundProofKeyring(t *testing.T) (keyring.Keyring, string) { + t.Helper() + ir := codectypes.NewInterfaceRegistry() + cryptocodec.RegisterInterfaces(ir) + cdc := codec.NewProtoCodec(ir) + kr := keyring.NewInMemory(cdc) + entropy, err := bip39.NewEntropy(128) + require.NoError(t, err) + mnemonic, err := bip39.NewMnemonic(entropy) + require.NoError(t, err) + algos, _ := kr.SupportedAlgorithms() + algo, err := keyring.NewSigningAlgoFromString("secp256k1", algos) + require.NoError(t, err) + _, err = kr.NewAccount("recipient-test", mnemonic, "", hd.CreateHDPath(118, 0, 0).String(), algo) + require.NoError(t, err) + return kr, "recipient-test" +} + +func TestGetCompoundProof_HappyPath(t *testing.T) { + t.Parallel() + + reader := &deterministicReader{} + srv := NewServer("recipient-1", &testP2PClient{}, nil).WithArtifactReader(reader) + + req := compoundRequestWith(fourValidRanges(), 1<<20) + resp, err := srv.GetCompoundProof(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, resp) + require.True(t, resp.Ok, "error: %s", resp.Error) + require.Empty(t, resp.Error) + require.Len(t, resp.RangeBytes, deterministic.LEP6CompoundRangesPerArtifact) + for i, b := range resp.RangeBytes { + require.Lenf(t, b, deterministic.LEP6CompoundRangeLenBytes, "range[%d]", i) + } + require.Equal(t, deterministic.LEP6CompoundRangesPerArtifact, reader.calls) + + // Recompute expected hash via the same deterministic reader to compare. + h := blake3.New(32, nil) + for _, b := range resp.RangeBytes { + _, _ = h.Write(b) + } + require.Equal(t, hex.EncodeToString(h.Sum(nil)), resp.ProofHashHex) + require.Empty(t, resp.RecipientSignature, "recipient signature deferred to B.3") + require.Equal(t, req.ChallengeId, resp.ChallengeId) + require.Equal(t, req.TicketId, resp.TicketId) + require.Equal(t, req.ArtifactKey, resp.ArtifactKey) +} + +func TestGetCompoundProof_RecipientSignatureUsesDerivationInputHash(t *testing.T) { + reader := &deterministicReader{} + kr, keyName := newCompoundProofKeyring(t) + srv := NewServer("recipient-1", &testP2PClient{}, nil).WithArtifactReader(reader).WithRecipientSigner(kr, keyName) + + req := compoundRequestWith(fourValidRanges(), 1<<20) + req.Seed = []byte("0123456789abcdef0123456789abcdef") + resp, err := srv.GetCompoundProof(context.Background(), req) + require.NoError(t, err) + require.True(t, resp.Ok, "error: %s", resp.Error) + require.NotEmpty(t, resp.RecipientSignature) + + offsets := make([]uint64, 0, len(req.Ranges)) + for _, rng := range req.Ranges { + offsets = append(offsets, rng.Start) + } + class := audittypes.StorageProofArtifactClass(req.ArtifactClass) + derivHash, err := deterministic.DerivationInputHash(req.Seed, req.TargetSupernodeAccount, req.TicketId, class, req.ArtifactOrdinal, offsets, uint64(deterministic.LEP6CompoundRangeLenBytes)) + require.NoError(t, err) + expectedTranscript, err := deterministic.TranscriptHash(deterministic.TranscriptInputs{ + EpochID: req.EpochId, + ChallengerSupernodeAccount: req.ChallengerAccount, + TargetSupernodeAccount: req.TargetSupernodeAccount, + TicketID: req.TicketId, + Bucket: audittypes.StorageProofBucketType(req.BucketType), + ArtifactClass: class, + ArtifactOrdinal: req.ArtifactOrdinal, + ArtifactKey: req.ArtifactKey, + DerivationInputHash: derivHash, + CompoundProofHashHex: resp.ProofHashHex, + ObserverIDs: req.ObserverAccounts, + }) + require.NoError(t, err) + + emptyDerivTranscript, err := deterministic.TranscriptHash(deterministic.TranscriptInputs{ + EpochID: req.EpochId, + ChallengerSupernodeAccount: req.ChallengerAccount, + TargetSupernodeAccount: req.TargetSupernodeAccount, + TicketID: req.TicketId, + Bucket: audittypes.StorageProofBucketType(req.BucketType), + ArtifactClass: class, + ArtifactOrdinal: req.ArtifactOrdinal, + ArtifactKey: req.ArtifactKey, + DerivationInputHash: "", + CompoundProofHashHex: resp.ProofHashHex, + ObserverIDs: req.ObserverAccounts, + }) + require.NoError(t, err) + + sig, err := hex.DecodeString(resp.RecipientSignature) + require.NoError(t, err) + rec, err := kr.Key(keyName) + require.NoError(t, err) + pub, err := rec.GetPubKey() + require.NoError(t, err) + require.True(t, pub.VerifySignature([]byte(expectedTranscript), sig), "recipient signature must verify against transcript containing derivation hash") + require.False(t, pub.VerifySignature([]byte(emptyDerivTranscript), sig), "recipient signature must not verify against empty-derivation transcript") +} + +func TestGetCompoundProof_AcceptsChainParamRangeCount(t *testing.T) { + t.Parallel() + + reader := &deterministicReader{} + srv := NewServer("recipient-1", &testP2PClient{}, nil).WithArtifactReader(reader) + + rng := fourValidRanges()[:3] + resp, err := srv.GetCompoundProof(context.Background(), compoundRequestWith(rng, 1<<20)) + require.NoError(t, err) + require.True(t, resp.Ok, "error: %s", resp.Error) + require.Len(t, resp.RangeBytes, 3) + require.Equal(t, 3, reader.calls) +} + +func TestGetCompoundProof_AcceptsChainParamRangeSize(t *testing.T) { + t.Parallel() + + reader := &deterministicReader{} + srv := NewServer("recipient-1", &testP2PClient{}, nil).WithArtifactReader(reader) + + ranges := []*supernode.ByteRange{ + {Start: 0, End: 200}, + {Start: 1024, End: 1224}, + {Start: 4096, End: 4296}, + {Start: 8192, End: 8392}, + } + resp, err := srv.GetCompoundProof(context.Background(), compoundRequestWith(ranges, 1<<20)) + require.NoError(t, err) + require.True(t, resp.Ok, "error: %s", resp.Error) + require.Len(t, resp.RangeBytes, len(ranges)) + for i, b := range resp.RangeBytes { + require.Lenf(t, b, 200, "range[%d]", i) + } + require.Equal(t, len(ranges), reader.calls) +} + +func TestGetCompoundProof_RejectsInconsistentRangeSize(t *testing.T) { + t.Parallel() + + srv := NewServer("recipient-1", &testP2PClient{}, nil).WithArtifactReader(&deterministicReader{}) + + bad := []*supernode.ByteRange{ + {Start: 0, End: 200}, + {Start: 1024, End: 1225}, + } + resp, err := srv.GetCompoundProof(context.Background(), compoundRequestWith(bad, 1<<20)) + require.NoError(t, err) + require.False(t, resp.Ok) + require.Contains(t, resp.Error, "invalid size") + require.Empty(t, resp.RangeBytes) +} + +func TestGetCompoundProof_RejectsEmptyRanges(t *testing.T) { + t.Parallel() + + srv := NewServer("recipient-1", &testP2PClient{}, nil).WithArtifactReader(&deterministicReader{}) + + resp, err := srv.GetCompoundProof(context.Background(), compoundRequestWith(nil, 1<<20)) + require.NoError(t, err) + require.False(t, resp.Ok) + require.Contains(t, resp.Error, "at least one range") + require.Empty(t, resp.RangeBytes) +} + +func TestGetCompoundProof_RangeOutOfBounds(t *testing.T) { + t.Parallel() + + srv := NewServer("recipient-1", &testP2PClient{}, nil).WithArtifactReader(&deterministicReader{}) + + rl := uint64(deterministic.LEP6CompoundRangeLenBytes) + rs := fourValidRanges() + // last range straddles end of artifact + artifactSize := rs[3].End - 1 + resp, err := srv.GetCompoundProof(context.Background(), compoundRequestWith(rs, artifactSize)) + require.NoError(t, err) + require.False(t, resp.Ok) + require.Contains(t, resp.Error, "out of bounds") + require.Empty(t, resp.RangeBytes) + _ = rl +} + +func TestGetCompoundProof_ReaderError(t *testing.T) { + t.Parallel() + + reader := &deterministicReader{err: io.ErrUnexpectedEOF} + srv := NewServer("recipient-1", &testP2PClient{}, nil).WithArtifactReader(reader) + + resp, err := srv.GetCompoundProof(context.Background(), compoundRequestWith(fourValidRanges(), 1<<20)) + require.NoError(t, err) + require.False(t, resp.Ok) + require.True(t, strings.Contains(resp.Error, io.ErrUnexpectedEOF.Error()), "error %q must wrap %v", resp.Error, io.ErrUnexpectedEOF) + require.Empty(t, resp.RangeBytes) +}