diff --git a/gen/supernode/action/cascade/service.pb.go b/gen/supernode/action/cascade/service.pb.go new file mode 100644 index 00000000..9b8cc5fb --- /dev/null +++ b/gen/supernode/action/cascade/service.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v5.29.3 +// source: proto/supernode/action/cascade/service.proto + +package cascade + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type UploadInputDataRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` + ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` + DataHash string `protobuf:"bytes,3,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + RqIc int32 `protobuf:"varint,4,opt,name=rq_ic,json=rqIc,proto3" json:"rq_ic,omitempty"` + RqMax int32 `protobuf:"varint,5,opt,name=rq_max,json=rqMax,proto3" json:"rq_max,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UploadInputDataRequest) Reset() { + *x = UploadInputDataRequest{} + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UploadInputDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadInputDataRequest) ProtoMessage() {} + +func (x *UploadInputDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadInputDataRequest.ProtoReflect.Descriptor instead. +func (*UploadInputDataRequest) Descriptor() ([]byte, []int) { + return file_proto_supernode_action_cascade_service_proto_rawDescGZIP(), []int{0} +} + +func (x *UploadInputDataRequest) GetFilename() string { + if x != nil { + return x.Filename + } + return "" +} + +func (x *UploadInputDataRequest) GetActionId() string { + if x != nil { + return x.ActionId + } + return "" +} + +func (x *UploadInputDataRequest) GetDataHash() string { + if x != nil { + return x.DataHash + } + return "" +} + +func (x *UploadInputDataRequest) GetRqIc() int32 { + if x != nil { + return x.RqIc + } + return 0 +} + +func (x *UploadInputDataRequest) GetRqMax() int32 { + if x != nil { + return x.RqMax + } + return 0 +} + +type UploadInputDataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UploadInputDataResponse) Reset() { + *x = UploadInputDataResponse{} + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UploadInputDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadInputDataResponse) ProtoMessage() {} + +func (x *UploadInputDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadInputDataResponse.ProtoReflect.Descriptor instead. +func (*UploadInputDataResponse) Descriptor() ([]byte, []int) { + return file_proto_supernode_action_cascade_service_proto_rawDescGZIP(), []int{1} +} + +func (x *UploadInputDataResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *UploadInputDataResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +var File_proto_supernode_action_cascade_service_proto protoreflect.FileDescriptor + +var file_proto_supernode_action_cascade_service_proto_rawDesc = string([]byte{ + 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, + 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x64, 0x61, 0x74, 0x61, 0x48, 0x61, 0x73, 0x68, 0x12, 0x13, 0x0a, 0x05, 0x72, 0x71, 0x5f, 0x69, + 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x71, 0x49, 0x63, 0x12, 0x15, 0x0a, + 0x06, 0x72, 0x71, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x72, + 0x71, 0x4d, 0x61, 0x78, 0x22, 0x4d, 0x0a, 0x17, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x32, 0x66, 0x0a, 0x0e, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, + 0x64, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x61, 0x73, 0x63, + 0x61, 0x64, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_proto_supernode_action_cascade_service_proto_rawDescOnce sync.Once + file_proto_supernode_action_cascade_service_proto_rawDescData []byte +) + +func file_proto_supernode_action_cascade_service_proto_rawDescGZIP() []byte { + file_proto_supernode_action_cascade_service_proto_rawDescOnce.Do(func() { + file_proto_supernode_action_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_supernode_action_cascade_service_proto_rawDesc), len(file_proto_supernode_action_cascade_service_proto_rawDesc))) + }) + return file_proto_supernode_action_cascade_service_proto_rawDescData +} + +var file_proto_supernode_action_cascade_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_proto_supernode_action_cascade_service_proto_goTypes = []any{ + (*UploadInputDataRequest)(nil), // 0: cascade.UploadInputDataRequest + (*UploadInputDataResponse)(nil), // 1: cascade.UploadInputDataResponse +} +var file_proto_supernode_action_cascade_service_proto_depIdxs = []int32{ + 0, // 0: cascade.CascadeService.UploadInputData:input_type -> cascade.UploadInputDataRequest + 1, // 1: cascade.CascadeService.UploadInputData:output_type -> cascade.UploadInputDataResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_proto_supernode_action_cascade_service_proto_init() } +func file_proto_supernode_action_cascade_service_proto_init() { + if File_proto_supernode_action_cascade_service_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_supernode_action_cascade_service_proto_rawDesc), len(file_proto_supernode_action_cascade_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_supernode_action_cascade_service_proto_goTypes, + DependencyIndexes: file_proto_supernode_action_cascade_service_proto_depIdxs, + MessageInfos: file_proto_supernode_action_cascade_service_proto_msgTypes, + }.Build() + File_proto_supernode_action_cascade_service_proto = out.File + file_proto_supernode_action_cascade_service_proto_goTypes = nil + file_proto_supernode_action_cascade_service_proto_depIdxs = nil +} diff --git a/gen/supernode/action/cascade/service_grpc.pb.go b/gen/supernode/action/cascade/service_grpc.pb.go new file mode 100644 index 00000000..f2bd9030 --- /dev/null +++ b/gen/supernode/action/cascade/service_grpc.pb.go @@ -0,0 +1,121 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.29.3 +// source: proto/supernode/action/cascade/service.proto + +package cascade + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + CascadeService_UploadInputData_FullMethodName = "/cascade.CascadeService/UploadInputData" +) + +// CascadeServiceClient is the client API for CascadeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CascadeServiceClient interface { + UploadInputData(ctx context.Context, in *UploadInputDataRequest, opts ...grpc.CallOption) (*UploadInputDataResponse, error) +} + +type cascadeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCascadeServiceClient(cc grpc.ClientConnInterface) CascadeServiceClient { + return &cascadeServiceClient{cc} +} + +func (c *cascadeServiceClient) UploadInputData(ctx context.Context, in *UploadInputDataRequest, opts ...grpc.CallOption) (*UploadInputDataResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UploadInputDataResponse) + err := c.cc.Invoke(ctx, CascadeService_UploadInputData_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CascadeServiceServer is the server API for CascadeService service. +// All implementations must embed UnimplementedCascadeServiceServer +// for forward compatibility. +type CascadeServiceServer interface { + UploadInputData(context.Context, *UploadInputDataRequest) (*UploadInputDataResponse, error) + mustEmbedUnimplementedCascadeServiceServer() +} + +// UnimplementedCascadeServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCascadeServiceServer struct{} + +func (UnimplementedCascadeServiceServer) UploadInputData(context.Context, *UploadInputDataRequest) (*UploadInputDataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UploadInputData not implemented") +} +func (UnimplementedCascadeServiceServer) mustEmbedUnimplementedCascadeServiceServer() {} +func (UnimplementedCascadeServiceServer) testEmbeddedByValue() {} + +// UnsafeCascadeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CascadeServiceServer will +// result in compilation errors. +type UnsafeCascadeServiceServer interface { + mustEmbedUnimplementedCascadeServiceServer() +} + +func RegisterCascadeServiceServer(s grpc.ServiceRegistrar, srv CascadeServiceServer) { + // If the following call pancis, it indicates UnimplementedCascadeServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&CascadeService_ServiceDesc, srv) +} + +func _CascadeService_UploadInputData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UploadInputDataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CascadeServiceServer).UploadInputData(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CascadeService_UploadInputData_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CascadeServiceServer).UploadInputData(ctx, req.(*UploadInputDataRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// CascadeService_ServiceDesc is the grpc.ServiceDesc for CascadeService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CascadeService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "cascade.CascadeService", + HandlerType: (*CascadeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UploadInputData", + Handler: _CascadeService_UploadInputData_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "proto/supernode/action/cascade/service.proto", +} diff --git a/gen/supernode/supernode/cascade_service.pb.go b/gen/supernode/supernode/cascade_service.pb.go new file mode 100644 index 00000000..0b47a6dc --- /dev/null +++ b/gen/supernode/supernode/cascade_service.pb.go @@ -0,0 +1,370 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v5.29.3 +// source: proto/supernode/supernode/cascade_service.proto + +package supernode + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SessionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SessionRequest) Reset() { + *x = SessionRequest{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SessionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionRequest) ProtoMessage() {} + +func (x *SessionRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionRequest.ProtoReflect.Descriptor instead. +func (*SessionRequest) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{0} +} + +func (x *SessionRequest) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +type SessionReply struct { + state protoimpl.MessageState `protogen:"open.v1"` + SessID string `protobuf:"bytes,1,opt,name=sessID,proto3" json:"sessID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SessionReply) Reset() { + *x = SessionReply{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SessionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReply) ProtoMessage() {} + +func (x *SessionReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReply.ProtoReflect.Descriptor instead. +func (*SessionReply) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{1} +} + +func (x *SessionReply) GetSessID() string { + if x != nil { + return x.SessID + } + return "" +} + +type SendTicketSignatureRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + RqFile []byte `protobuf:"bytes,4,opt,name=rqFile,proto3" json:"rqFile,omitempty"` + RqEncodeParams *EncoderParameters `protobuf:"bytes,5,opt,name=rqEncodeParams,proto3" json:"rqEncodeParams,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendTicketSignatureRequest) Reset() { + *x = SendTicketSignatureRequest{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendTicketSignatureRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendTicketSignatureRequest) ProtoMessage() {} + +func (x *SendTicketSignatureRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendTicketSignatureRequest.ProtoReflect.Descriptor instead. +func (*SendTicketSignatureRequest) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{2} +} + +func (x *SendTicketSignatureRequest) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +func (x *SendTicketSignatureRequest) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *SendTicketSignatureRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *SendTicketSignatureRequest) GetRqFile() []byte { + if x != nil { + return x.RqFile + } + return nil +} + +func (x *SendTicketSignatureRequest) GetRqEncodeParams() *EncoderParameters { + if x != nil { + return x.RqEncodeParams + } + return nil +} + +type SendTicketSignatureReply struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendTicketSignatureReply) Reset() { + *x = SendTicketSignatureReply{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendTicketSignatureReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendTicketSignatureReply) ProtoMessage() {} + +func (x *SendTicketSignatureReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendTicketSignatureReply.ProtoReflect.Descriptor instead. +func (*SendTicketSignatureReply) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{3} +} + +type EncoderParameters struct { + state protoimpl.MessageState `protogen:"open.v1"` + Oti []byte `protobuf:"bytes,1,opt,name=Oti,proto3" json:"Oti,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EncoderParameters) Reset() { + *x = EncoderParameters{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EncoderParameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EncoderParameters) ProtoMessage() {} + +func (x *EncoderParameters) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EncoderParameters.ProtoReflect.Descriptor instead. +func (*EncoderParameters) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{4} +} + +func (x *EncoderParameters) GetOti() []byte { + if x != nil { + return x.Oti + } + return nil +} + +var File_proto_supernode_supernode_cascade_service_proto protoreflect.FileDescriptor + +var file_proto_supernode_supernode_cascade_service_proto_rawDesc = string([]byte{ + 0x0a, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x63, 0x61, 0x73, 0x63, + 0x61, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x28, 0x0a, 0x0e, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x22, 0x26, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x73, 0x73, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x73, 0x73, 0x49, 0x44, 0x22, 0xc4, + 0x01, 0x0a, 0x1a, 0x53, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, + 0x6f, 0x64, 0x65, 0x49, 0x44, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x71, 0x46, 0x69, 0x6c, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x71, 0x46, 0x69, 0x6c, 0x65, 0x12, + 0x44, 0x0a, 0x0e, 0x72, 0x71, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0e, 0x72, 0x71, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x22, 0x25, 0x0a, 0x11, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x4f, 0x74, 0x69, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x03, 0x4f, 0x74, 0x69, 0x32, 0xbd, 0x01, 0x0a, 0x0e, 0x43, 0x61, 0x73, + 0x63, 0x61, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x17, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x68, + 0x0a, 0x1a, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x54, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x25, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x53, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x3d, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, + 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_proto_supernode_supernode_cascade_service_proto_rawDescOnce sync.Once + file_proto_supernode_supernode_cascade_service_proto_rawDescData []byte +) + +func file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP() []byte { + file_proto_supernode_supernode_cascade_service_proto_rawDescOnce.Do(func() { + file_proto_supernode_supernode_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_supernode_supernode_cascade_service_proto_rawDesc), len(file_proto_supernode_supernode_cascade_service_proto_rawDesc))) + }) + return file_proto_supernode_supernode_cascade_service_proto_rawDescData +} + +var file_proto_supernode_supernode_cascade_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_proto_supernode_supernode_cascade_service_proto_goTypes = []any{ + (*SessionRequest)(nil), // 0: supernode.SessionRequest + (*SessionReply)(nil), // 1: supernode.SessionReply + (*SendTicketSignatureRequest)(nil), // 2: supernode.SendTicketSignatureRequest + (*SendTicketSignatureReply)(nil), // 3: supernode.SendTicketSignatureReply + (*EncoderParameters)(nil), // 4: supernode.EncoderParameters +} +var file_proto_supernode_supernode_cascade_service_proto_depIdxs = []int32{ + 4, // 0: supernode.SendTicketSignatureRequest.rqEncodeParams:type_name -> supernode.EncoderParameters + 0, // 1: supernode.CascadeService.Session:input_type -> supernode.SessionRequest + 2, // 2: supernode.CascadeService.SendCascadeTicketSignature:input_type -> supernode.SendTicketSignatureRequest + 1, // 3: supernode.CascadeService.Session:output_type -> supernode.SessionReply + 3, // 4: supernode.CascadeService.SendCascadeTicketSignature:output_type -> supernode.SendTicketSignatureReply + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_supernode_supernode_cascade_service_proto_init() } +func file_proto_supernode_supernode_cascade_service_proto_init() { + if File_proto_supernode_supernode_cascade_service_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_supernode_supernode_cascade_service_proto_rawDesc), len(file_proto_supernode_supernode_cascade_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_supernode_supernode_cascade_service_proto_goTypes, + DependencyIndexes: file_proto_supernode_supernode_cascade_service_proto_depIdxs, + MessageInfos: file_proto_supernode_supernode_cascade_service_proto_msgTypes, + }.Build() + File_proto_supernode_supernode_cascade_service_proto = out.File + file_proto_supernode_supernode_cascade_service_proto_goTypes = nil + file_proto_supernode_supernode_cascade_service_proto_depIdxs = nil +} diff --git a/gen/supernode/supernode/cascade_service_grpc.pb.go b/gen/supernode/supernode/cascade_service_grpc.pb.go new file mode 100644 index 00000000..453a3f89 --- /dev/null +++ b/gen/supernode/supernode/cascade_service_grpc.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.29.3 +// source: proto/supernode/supernode/cascade_service.proto + +package supernode + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + CascadeService_Session_FullMethodName = "/supernode.CascadeService/Session" + CascadeService_SendCascadeTicketSignature_FullMethodName = "/supernode.CascadeService/SendCascadeTicketSignature" +) + +// CascadeServiceClient is the client API for CascadeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CascadeServiceClient interface { + // Session informs primary supernode about its `nodeID` and `sessID` it wants to connect to. + // The stream is used by the parties to inform each other about the cancellation of the task. + Session(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SessionRequest, SessionReply], error) + // SendSenseTicketSignature send signature from supernodes mn2/mn3 for given reg NFT session id to primary supernode + SendCascadeTicketSignature(ctx context.Context, in *SendTicketSignatureRequest, opts ...grpc.CallOption) (*SendTicketSignatureReply, error) +} + +type cascadeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCascadeServiceClient(cc grpc.ClientConnInterface) CascadeServiceClient { + return &cascadeServiceClient{cc} +} + +func (c *cascadeServiceClient) Session(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SessionRequest, SessionReply], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &CascadeService_ServiceDesc.Streams[0], CascadeService_Session_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[SessionRequest, SessionReply]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CascadeService_SessionClient = grpc.BidiStreamingClient[SessionRequest, SessionReply] + +func (c *cascadeServiceClient) SendCascadeTicketSignature(ctx context.Context, in *SendTicketSignatureRequest, opts ...grpc.CallOption) (*SendTicketSignatureReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SendTicketSignatureReply) + err := c.cc.Invoke(ctx, CascadeService_SendCascadeTicketSignature_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CascadeServiceServer is the server API for CascadeService service. +// All implementations must embed UnimplementedCascadeServiceServer +// for forward compatibility. +type CascadeServiceServer interface { + // Session informs primary supernode about its `nodeID` and `sessID` it wants to connect to. + // The stream is used by the parties to inform each other about the cancellation of the task. + Session(grpc.BidiStreamingServer[SessionRequest, SessionReply]) error + // SendSenseTicketSignature send signature from supernodes mn2/mn3 for given reg NFT session id to primary supernode + SendCascadeTicketSignature(context.Context, *SendTicketSignatureRequest) (*SendTicketSignatureReply, error) + mustEmbedUnimplementedCascadeServiceServer() +} + +// UnimplementedCascadeServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCascadeServiceServer struct{} + +func (UnimplementedCascadeServiceServer) Session(grpc.BidiStreamingServer[SessionRequest, SessionReply]) error { + return status.Errorf(codes.Unimplemented, "method Session not implemented") +} +func (UnimplementedCascadeServiceServer) SendCascadeTicketSignature(context.Context, *SendTicketSignatureRequest) (*SendTicketSignatureReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendCascadeTicketSignature not implemented") +} +func (UnimplementedCascadeServiceServer) mustEmbedUnimplementedCascadeServiceServer() {} +func (UnimplementedCascadeServiceServer) testEmbeddedByValue() {} + +// UnsafeCascadeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CascadeServiceServer will +// result in compilation errors. +type UnsafeCascadeServiceServer interface { + mustEmbedUnimplementedCascadeServiceServer() +} + +func RegisterCascadeServiceServer(s grpc.ServiceRegistrar, srv CascadeServiceServer) { + // If the following call pancis, it indicates UnimplementedCascadeServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&CascadeService_ServiceDesc, srv) +} + +func _CascadeService_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(CascadeServiceServer).Session(&grpc.GenericServerStream[SessionRequest, SessionReply]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CascadeService_SessionServer = grpc.BidiStreamingServer[SessionRequest, SessionReply] + +func _CascadeService_SendCascadeTicketSignature_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendTicketSignatureRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CascadeServiceServer).SendCascadeTicketSignature(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CascadeService_SendCascadeTicketSignature_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CascadeServiceServer).SendCascadeTicketSignature(ctx, req.(*SendTicketSignatureRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// CascadeService_ServiceDesc is the grpc.ServiceDesc for CascadeService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CascadeService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "supernode.CascadeService", + HandlerType: (*CascadeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendCascadeTicketSignature", + Handler: _CascadeService_SendCascadeTicketSignature_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Session", + Handler: _CascadeService_Session_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "proto/supernode/supernode/cascade_service.proto", +} diff --git a/go.mod b/go.mod index f4047d77..91169655 100644 --- a/go.mod +++ b/go.mod @@ -6,21 +6,25 @@ require ( cosmossdk.io/api v0.7.6 github.com/LumeraProtocol/lumera v0.4.2 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce + github.com/cenkalti/backoff v2.2.1+incompatible github.com/cenkalti/backoff/v4 v4.3.0 github.com/cosmos/btcutil v1.0.5 github.com/cosmos/cosmos-sdk v0.50.12 github.com/cosmos/go-bip39 v1.0.0 github.com/cosmos/gogoproto v1.7.0 + github.com/disintegration/imaging v1.6.2 github.com/go-errors/errors v1.5.1 github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 github.com/jmoiron/sqlx v1.4.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 + github.com/kolesa-team/go-webp v1.0.4 github.com/mattn/go-sqlite3 v1.14.24 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 github.com/x-cray/logrus-prefixed-formatter v0.5.2 go.uber.org/ratelimit v0.3.1 @@ -145,7 +149,6 @@ require ( github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.19.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect @@ -156,6 +159,7 @@ require ( go.etcd.io/bbolt v1.3.10 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d // indirect golang.org/x/net v0.35.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/text v0.22.0 // indirect diff --git a/go.sum b/go.sum index 7b8e02be..809c3c42 100644 --- a/go.sum +++ b/go.sum @@ -123,6 +123,7 @@ github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46f github.com/bufbuild/protocompile v0.14.0 h1:z3DW4IvXE5G/uTOnSQn+qwQQxvhckkTWLS/0No/o7KU= github.com/bufbuild/protocompile v0.14.0/go.mod h1:N6J1NYzkspJo3ZwyL4Xjvli86XOj1xq4qAasUFxGups= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -229,6 +230,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= +github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -515,6 +518,8 @@ github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/kolesa-team/go-webp v1.0.4 h1:wQvU4PLG/X7RS0vAeyhiivhLRoxfLVRlDq4I3frdxIQ= +github.com/kolesa-team/go-webp v1.0.4/go.mod h1:oMvdivD6K+Q5qIIkVC2w4k2ZUnI1H+MyP7inwgWq9aA= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -872,6 +877,9 @@ golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0J golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d h1:RNPAfi2nHY7C2srAV8A49jpsYr0ADedCk1wq6fTMTvs= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/pkg/common/blocktracker/block_tracker.go b/pkg/common/blocktracker/block_tracker.go new file mode 100644 index 00000000..13a5c5fd --- /dev/null +++ b/pkg/common/blocktracker/block_tracker.go @@ -0,0 +1,121 @@ +package blocktracker + +import ( + "context" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" +) + +const ( + defaultRetries = 3 + defaultDelayDurationBetweenRetries = 5 * time.Second + defaultRPCConnectTimeout = 15 * time.Second + // Update duration in case last update was success + defaultSuccessUpdateDuration = 10 * time.Second + // Update duration in case last update was failed - prevent too much call to Lumera + defaultFailedUpdateDuration = 5 * time.Second + defaultNextBlockTimeout = 30 * time.Minute +) + +// LumeraClient defines interface functions BlockCntTracker expects from Lumera +type LumeraClient interface { + // GetBlockCount returns block height of blockchain + GetBlockCount(ctx context.Context) (int32, error) +} + +// BlockCntTracker defines a block tracker - that will keep current block height +type BlockCntTracker struct { + mtx sync.Mutex + LumeraClient LumeraClient + curBlockCnt int32 + lastSuccess time.Time + lastRetried time.Time + lastErr error + delayBetweenRetries time.Duration + retries int +} + +// New returns an instance of BlockCntTracker +func New(LumeraClient LumeraClient) *BlockCntTracker { + return &BlockCntTracker{ + LumeraClient: LumeraClient, + curBlockCnt: 0, + delayBetweenRetries: defaultDelayDurationBetweenRetries, + retries: defaultRetries, + } +} + +func (tracker *BlockCntTracker) refreshBlockCount(retries int) { + tracker.lastRetried = time.Now().UTC() + for i := 0; i < retries; i = i + 1 { + ctx, cancel := context.WithTimeout(context.Background(), defaultRPCConnectTimeout) + blockCnt, err := tracker.LumeraClient.GetBlockCount(ctx) + if err == nil { + tracker.curBlockCnt = blockCnt + tracker.lastSuccess = time.Now().UTC() + cancel() + tracker.lastErr = nil + return + } + cancel() + + tracker.lastErr = err + // delay between retries + time.Sleep(tracker.delayBetweenRetries) + } + +} + +// GetBlockCount return current block count +// it will get from cache if last refresh is small than defaultSuccessUpdateDuration +// or will refresh it by call from Lumera daemon to get the latest one if defaultSuccessUpdateDuration expired +func (tracker *BlockCntTracker) GetBlockCount() (int32, error) { + tracker.mtx.Lock() + defer tracker.mtx.Unlock() + + shouldRefresh := false + + if tracker.lastSuccess.After(tracker.lastRetried) { + if time.Now().UTC().After(tracker.lastSuccess.Add(defaultSuccessUpdateDuration)) { + shouldRefresh = true + } + } else { + // prevent update too much + if time.Now().UTC().After(tracker.lastRetried.Add(defaultFailedUpdateDuration)) { + shouldRefresh = true + } + } + + if shouldRefresh { + tracker.refreshBlockCount(tracker.retries) + } + + if tracker.curBlockCnt == 0 { + return 0, errors.Errorf("failed to get blockcount: %w", tracker.lastErr) + } + + return tracker.curBlockCnt, nil +} + +// WaitTillNextBlock will wait until next block height is greater than blockCnt +func (tracker *BlockCntTracker) WaitTillNextBlock(ctx context.Context, blockCnt int32) error { + for { + select { + case <-ctx.Done(): + return errors.Errorf("context done: %w", ctx.Err()) + case <-time.After(defaultNextBlockTimeout): + return errors.Errorf("timeout waiting for next block") + case <-time.After(defaultSuccessUpdateDuration): + curBlockCnt, err := tracker.GetBlockCount() + if err != nil { + return errors.Errorf("failed to get blockcount: %w", err) + } + + if curBlockCnt > blockCnt { + return nil + } + } + } +} diff --git a/pkg/common/blocktracker/block_tracker_test.go b/pkg/common/blocktracker/block_tracker_test.go new file mode 100644 index 00000000..b070a4b7 --- /dev/null +++ b/pkg/common/blocktracker/block_tracker_test.go @@ -0,0 +1,97 @@ +package blocktracker + +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type fakePastelClient struct { + retBlockCnt int32 + retErr error +} + +func (fake *fakePastelClient) GetBlockCount(_ context.Context) (int32, error) { + return fake.retBlockCnt, fake.retErr +} + +func TestGetCountFirstTime(t *testing.T) { + tests := []struct { + name string + pastelClient *fakePastelClient + expectErr bool + }{ + { + name: "success", + pastelClient: &fakePastelClient{ + retBlockCnt: 10, + retErr: nil, + }, + expectErr: false, + }, + { + name: "fail", + pastelClient: &fakePastelClient{ + retBlockCnt: 0, + retErr: errors.New("error"), + }, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tracker := New(tt.pastelClient) + tracker.retries = 1 + blkCnt, err := tracker.GetBlockCount() + assert.Equal(t, tt.pastelClient.retBlockCnt, blkCnt) + if tt.expectErr { + assert.True(t, strings.Contains(err.Error(), tt.pastelClient.retErr.Error())) + } else { + assert.Nil(t, err) + } + }) + } +} + +func TestGetBlockCountNoRefresh(t *testing.T) { + pastelClient := &fakePastelClient{ + retBlockCnt: 10, + retErr: errors.New("error"), + } + + expectedBlk := int32(1) + tracker := New(pastelClient) + tracker.retries = 1 + tracker.curBlockCnt = expectedBlk + tracker.lastRetried = time.Now().UTC() + tracker.lastSuccess = time.Now().UTC() + + blkCnt, err := tracker.GetBlockCount() + assert.Equal(t, expectedBlk, blkCnt) + + assert.Nil(t, err) +} + +func TestGetBlockCountRefresh(t *testing.T) { + expectedBlk := int32(10) + pastelClient := &fakePastelClient{ + retBlockCnt: expectedBlk, + retErr: nil, + } + + tracker := New(pastelClient) + tracker.retries = 1 + tracker.curBlockCnt = 1 + tracker.lastRetried = time.Now().UTC().Add(-defaultSuccessUpdateDuration) + tracker.lastSuccess = time.Now().UTC().Add(-defaultSuccessUpdateDuration) + + blkCnt, err := tracker.GetBlockCount() + assert.Equal(t, expectedBlk, blkCnt) + + assert.Nil(t, err) +} diff --git a/pkg/common/task/action.go b/pkg/common/task/action.go new file mode 100644 index 00000000..227ebe35 --- /dev/null +++ b/pkg/common/task/action.go @@ -0,0 +1,20 @@ +package task + +import "context" + +// ActionFn represents a function that is run inside a goroutine. +type ActionFn func(ctx context.Context) error + +// Action represents the action of the task. +type Action struct { + fn ActionFn + doneCh chan struct{} +} + +// NewAction returns a new Action instance. +func NewAction(fn ActionFn) *Action { + return &Action{ + fn: fn, + doneCh: make(chan struct{}), + } +} diff --git a/pkg/common/task/state/state.go b/pkg/common/task/state/state.go new file mode 100644 index 00000000..e1eed5fd --- /dev/null +++ b/pkg/common/task/state/state.go @@ -0,0 +1,174 @@ +//go:generate mockery --name=State + +package state + +import ( + "context" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage/queries" + "github.com/LumeraProtocol/supernode/pkg/types" +) + +// State represents a state of the task. +type State interface { + // Status returns the current status. + Status() *Status + + // SetStatusNotifyFunc sets a function to be called after the state is updated. + SetStatusNotifyFunc(fn func(status *Status)) + + // RequiredStatus returns an error if the current status doen't match the given one. + RequiredStatus(subStatus SubStatus) error + + // StatusHistory returns all history from the very beginning. + StatusHistory() []*Status + + // UpdateStatus updates the status of the state by creating a new status with the given `status`. + UpdateStatus(subStatus SubStatus) + + // SubscribeStatus returns a new subscription of the state. + SubscribeStatus() func() <-chan *Status + + //SetStateLog set the wallet node task status log to the state status log + SetStateLog(statusLog types.Fields) + + //InitialiseHistoryDB sets the connection to historyDB + InitialiseHistoryDB(store queries.LocalStoreInterface) +} + +type state struct { + status *Status + history []*Status + + notifyFn func(status *Status) + sync.RWMutex + subsCh []chan *Status + taskID string + statusLog types.Fields + historyDBStore queries.LocalStoreInterface +} + +// Status implements State.Status() +func (state *state) Status() *Status { + return state.status +} + +// SetStatusNotifyFunc implements State.SetStatusNotifyFunc() +func (state *state) SetStatusNotifyFunc(fn func(status *Status)) { + state.notifyFn = fn +} + +// RequiredStatus implements State.RequiredStatus() +func (state *state) RequiredStatus(subStatus SubStatus) error { + if state.status.Is(subStatus) { + return nil + } + return errors.Errorf("required status %q, current %q", subStatus, state.status) +} + +// StatusHistory implements State.StatusHistory() +func (state *state) StatusHistory() []*Status { + state.RLock() + defer state.RUnlock() + + return append(state.history, state.status) +} + +// UpdateStatus implements State.UpdateStatus() +func (state *state) UpdateStatus(subStatus SubStatus) { + state.Lock() + defer state.Unlock() + + status := NewStatus(subStatus) + state.history = append(state.history, state.status) + state.status = status + + history := types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: state.taskID, Status: status.String()} + if state.statusLog.IsValid() { + history.Details = types.NewDetails(status.String(), state.statusLog) + } + + if state.historyDBStore != nil { + if _, err := state.historyDBStore.InsertTaskHistory(history); err != nil { + log.WithError(err).Error("unable to store task status") + } + } else { + store, err := queries.OpenHistoryDB() + if err != nil { + log.WithError(err).Error("error opening history db") + } + + if store != nil { + defer store.CloseHistoryDB(context.Background()) + if _, err := store.InsertTaskHistory(history); err != nil { + log.WithError(err).Error("unable to store task status") + } + } + } + + if state.notifyFn != nil { + state.notifyFn(status) + } + + for _, subCh := range state.subsCh { + subCh := subCh + go func() { + subCh <- status + }() + } +} + +// SubscribeStatus implements State.SubscribeStatus() +func (state *state) SubscribeStatus() func() <-chan *Status { + state.RLock() + defer state.RUnlock() + + subCh := make(chan *Status) + state.subsCh = append(state.subsCh, subCh) + + for _, status := range append(state.history, state.status) { + status := status + go func() { + subCh <- status + }() + } + + sub := func() <-chan *Status { + return subCh + } + return sub +} + +func (state *state) SetStateLog(statusLog types.Fields) { + state.statusLog = statusLog +} + +func (state *state) InitialiseHistoryDB(storeInterface queries.LocalStoreInterface) { + state.historyDBStore = storeInterface +} + +// New returns a new state instance. +func New(subStatus SubStatus, taskID string) State { + store, err := queries.OpenHistoryDB() + if err != nil { + log.WithError(err).Error("error opening history db") + } + + if store != nil { + defer store.CloseHistoryDB(context.Background()) + + if _, err := store.InsertTaskHistory(types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: taskID, + Status: subStatus.String()}); err != nil { + log.WithError(err).Error("unable to store task status") + } + } + + return &state{ + status: NewStatus(subStatus), + taskID: taskID, + } +} diff --git a/pkg/common/task/state/status.go b/pkg/common/task/state/status.go new file mode 100644 index 00000000..b1b00da6 --- /dev/null +++ b/pkg/common/task/state/status.go @@ -0,0 +1,34 @@ +//go:generate mockery --name=SubStatus + +package state + +import ( + "fmt" + "time" +) + +// SubStatus represents a sub-status that contains a description of the status. +type SubStatus interface { + fmt.Stringer + IsFinal() bool + IsFailure() bool +} + +// Status represents a state of the task. +type Status struct { + CreatedAt time.Time + SubStatus +} + +// Is returns true if the current `Status` matches to the given `statuses`. +func (status *Status) Is(subStatus SubStatus) bool { + return status.SubStatus == subStatus +} + +// NewStatus returns a new Status instance. +func NewStatus(subStatus SubStatus) *Status { + return &Status{ + CreatedAt: time.Now().UTC(), + SubStatus: subStatus, + } +} diff --git a/pkg/common/task/task.go b/pkg/common/task/task.go new file mode 100644 index 00000000..88a64add --- /dev/null +++ b/pkg/common/task/task.go @@ -0,0 +1,143 @@ +//go:generate mockery --name=Task + +package task + +import ( + "context" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/common/task/state" + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/random" +) + +// Task represent a worker task. +type Task interface { + state.State + + // ID returns id of the task. + ID() string + + // Run starts the task. + Run(ctx context.Context) error + + // Cancel tells a task to abandon its work. + // Cancel may be called by multiple goroutines simultaneously. + // After the first call, subsequent calls to a Cancel do nothing. + Cancel() + + // Done returns a channel when the task is canceled. + Done() <-chan struct{} + + // RunAction waits for new actions, starts handling each of them in a new goroutine. + RunAction(ctx context.Context) error + + // NewAction creates a new action and passes for the execution. + // It is used when it is necessary to run an action in the context of `Tasks` rather than the one who was called. + NewAction(fn ActionFn) <-chan struct{} + + // CloseActionCh closes action ch + CloseActionCh() +} + +type task struct { + state.State + + id string + + actionCh chan *Action + + doneMu sync.Mutex + doneCh chan struct{} + closeOnce sync.Once +} + +// ID implements Task.ID +func (task *task) ID() string { + return task.id +} + +// Run implements Task.Run +func (task *task) Run(_ context.Context) error { + return errors.New("task default run func not implemented") +} + +// Cancel implements Task.Cancel +func (task *task) Cancel() { + task.doneMu.Lock() + defer task.doneMu.Unlock() + + select { + case <-task.Done(): + log.Debugf("task %s cancelled", task.ID()) + return + default: + close(task.doneCh) + } +} + +// Done implements Task.Done +func (task *task) Done() <-chan struct{} { + return task.doneCh +} + +// RunAction implements Task.RunAction +func (task *task) RunAction(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + group, ctx := errgroup.WithContext(ctx) + for { + select { + case <-ctx.Done(): + log.WithContext(ctx).WithField("task", task.ID()).Info("context done") + case <-task.Done(): + log.WithContext(ctx).Infof("task %s done", task.ID()) + cancel() + case action, ok := <-task.actionCh: + if !ok { + log.WithContext(ctx).Info("action channel closed") + return group.Wait() + } + + currAction := action + group.Go(func() error { + defer close(currAction.doneCh) + + return currAction.fn(ctx) + }) + continue + } + break + } + + return group.Wait() +} + +// CloseActionCh safely closes the action channel +func (task *task) CloseActionCh() { + task.closeOnce.Do(func() { + close(task.actionCh) + }) +} + +// NewAction implements Task.NewAction +func (task *task) NewAction(fn ActionFn) <-chan struct{} { + act := NewAction(fn) + task.actionCh <- act + return act.doneCh +} + +// New returns a new task instance. +func New(status state.SubStatus) Task { + taskID, _ := random.String(8, random.Base62Chars) + + return &task{ + State: state.New(status, taskID), + id: taskID, + doneCh: make(chan struct{}), + actionCh: make(chan *Action), + } +} diff --git a/pkg/common/task/ticket.go b/pkg/common/task/ticket.go new file mode 100644 index 00000000..561b8f0b --- /dev/null +++ b/pkg/common/task/ticket.go @@ -0,0 +1,13 @@ +package task + +type CascadeTicket struct { + Creator string `json:"creator"` + CreatorSignature []byte `json:"creator_signature"` + DataHash string `json:"data_hash"` + ActionID string `json:"action_id"` + BlockHeight int64 `json:"block_height"` + BlockHash []byte `json:"block_hash"` + RQIDsIC uint32 `json:"rqids_ic"` + RQIDsMax int32 `json:"rqids_max"` + RQIDs []string `json:"rq_ids"` +} diff --git a/pkg/common/task/worker.go b/pkg/common/task/worker.go new file mode 100644 index 00000000..724d74c5 --- /dev/null +++ b/pkg/common/task/worker.go @@ -0,0 +1,90 @@ +package task + +import ( + "context" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/log" +) + +// Worker represents a pool of the task. +type Worker struct { + sync.Mutex + + tasks []Task + taskCh chan Task +} + +// Tasks returns all tasks. +func (worker *Worker) Tasks() []Task { + return worker.tasks +} + +// Task returns the task by the given id. +func (worker *Worker) Task(taskID string) Task { + worker.Lock() + defer worker.Unlock() + + for _, task := range worker.tasks { + if task.ID() == taskID { + return task + } + } + return nil +} + +// AddTask adds the new task. +func (worker *Worker) AddTask(task Task) { + worker.Lock() + defer worker.Unlock() + + worker.tasks = append(worker.tasks, task) + worker.taskCh <- task +} + +// RemoveTask removes the task. +func (worker *Worker) RemoveTask(subTask Task) { + worker.Lock() + defer worker.Unlock() + + for i, task := range worker.tasks { + if task == subTask { + worker.tasks = append(worker.tasks[:i], worker.tasks[i+1:]...) + return + } + } +} + +// Run waits for new tasks, starts handling each of them in a new goroutine. +func (worker *Worker) Run(ctx context.Context) error { + group, _ := errgroup.WithContext(ctx) // Create an error group but ignore the derived context + for { + select { + case <-ctx.Done(): + log.WithContext(ctx).Warn("worker run stopping : %w", ctx.Err()) + return group.Wait() + case t := <-worker.taskCh: // Rename here + currentTask := t // Capture the loop variable + group.Go(func() error { + defer func() { + if r := recover(); r != nil { + log.WithContext(ctx).Errorf("Recovered from panic in common task's worker run: %v", r) + } + + log.WithContext(ctx).WithField("task", currentTask.ID()).Info("Task Removed") + worker.RemoveTask(currentTask) + }() + + return currentTask.Run(ctx) // Use the captured variable + }) + } + } +} + +// NewWorker returns a new Worker instance. +func NewWorker() *Worker { + return &Worker{ + taskCh: make(chan Task), + } +} diff --git a/pkg/common/task/worker_test.go b/pkg/common/task/worker_test.go new file mode 100644 index 00000000..4c5f21ac --- /dev/null +++ b/pkg/common/task/worker_test.go @@ -0,0 +1,147 @@ +package task + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWorkerTasks(t *testing.T) { + t.Parallel() + + type fields struct { + tasks []Task + } + tests := []struct { + name string + fields fields + want []Task + }{ + { + name: "retrieve tasks", + fields: fields{ + tasks: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + want: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + tasks: tt.fields.tasks, + } + assert.Equal(t, tt.want, worker.Tasks()) + }) + } +} + +func TestWorkerTask(t *testing.T) { + t.Parallel() + + type fields struct { + tasks []Task + } + type args struct { + taskID string + } + tests := []struct { + name string + fields fields + args args + want Task + }{ + { + name: "get task with id 1", + fields: fields{ + tasks: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + args: args{"2"}, + want: &task{id: "2"}, + }, + { + name: "get not exist task", + fields: fields{ + tasks: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + args: args{"3"}, + want: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + tasks: tt.fields.tasks, + } + assert.Equal(t, tt.want, worker.Task(tt.args.taskID)) + }) + } +} + +func TestWorkerAddTask(t *testing.T) { + t.Parallel() + + type args struct { + task Task + } + tests := []struct { + name string + args args + want []Task + }{ + { + name: "add task", + args: args{&task{id: "1"}}, + want: []Task{&task{id: "1"}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + taskCh: make(chan Task), + } + + go func() { + worker.AddTask(tt.args.task) + }() + + <-worker.taskCh + tasks := worker.tasks + assert.Equal(t, tt.want, tasks) + + }) + } +} + +func TestWorkerRemoveTask(t *testing.T) { + t.Parallel() + + type args struct { + subTask Task + } + tests := []struct { + name string + args args + want []Task + }{ + { + name: "removed task", + args: args{&task{id: "1"}}, + want: []Task{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + tasks: []Task{tt.args.subTask}, + } + + worker.RemoveTask(tt.args.subTask) + assert.Equal(t, tt.want, worker.tasks) + }) + } +} diff --git a/pkg/configurer/file.go b/pkg/configurer/file.go new file mode 100644 index 00000000..49fab2bc --- /dev/null +++ b/pkg/configurer/file.go @@ -0,0 +1,58 @@ +package configurer + +import ( + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/spf13/viper" +) + +// SetDefaultConfigPaths sets default paths for Viper to search for the config file in. +func SetDefaultConfigPaths(paths ...string) { + defaultConfigPaths = paths +} + +// ParseFile parses the config file from the given path `filename`, and assign it to the struct `config`. +func ParseFile(filename string, config interface{}) error { + var configType string + + switch filepath.Ext(filename) { + case ".conf": + configType = "env" + } + + return parseFile(filename, configType, config) +} + +// ParseJSONFile parses json config file from the given path `filename`, and assign it to the struct `config`. +func ParseJSONFile(filename string, config interface{}) error { + return parseFile(filename, "json", config) +} + +func parseFile(filename, configType string, config interface{}) error { + conf := viper.New() + + for _, configPath := range defaultConfigPaths { + conf.AddConfigPath(filepath.FromSlash(configPath)) + } + + if dir, _ := filepath.Split(filename); dir != "" { + conf.SetConfigFile(filename) + } else { + conf.SetConfigName(filename) + } + + if configType != "" { + conf.SetConfigType(configType) + } + + if err := conf.ReadInConfig(); err != nil { + return errors.Errorf("could not read config file: %w", err) + } + + if err := conf.Unmarshal(&config); err != nil { + return errors.Errorf("unable to decode into struct, %w", err) + } + + return nil +} diff --git a/pkg/configurer/path_darwin.go b/pkg/configurer/path_darwin.go new file mode 100644 index 00000000..3be74f09 --- /dev/null +++ b/pkg/configurer/path_darwin.go @@ -0,0 +1,20 @@ +//go:build darwin +// +build darwin + +package configurer + +import ( + "os" + "path/filepath" +) + +var defaultConfigPaths = []string{ + "$HOME/Library/Application Support/Pastel", + ".", +} + +// DefaultPath returns the default config path for darwin OS. +func DefaultPath() string { + homeDir, _ := os.UserConfigDir() + return filepath.Join(homeDir, "Pastel") +} diff --git a/pkg/configurer/path_linux.go b/pkg/configurer/path_linux.go new file mode 100644 index 00000000..efdb75da --- /dev/null +++ b/pkg/configurer/path_linux.go @@ -0,0 +1,20 @@ +//go:build linux +// +build linux + +package configurer + +import ( + "os" + "path/filepath" +) + +var defaultConfigPaths = []string{ + "$HOME/.pastel", + ".", +} + +// DefaultPath returns the default config path for Linux OS. +func DefaultPath() string { + homeDir, _ := os.UserHomeDir() + return filepath.Join(homeDir, ".pastel") +} diff --git a/pkg/configurer/path_windows.go b/pkg/configurer/path_windows.go new file mode 100644 index 00000000..9d313a6f --- /dev/null +++ b/pkg/configurer/path_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +package configurer + +import ( + "os" + "path" + "path/filepath" + "syscall" +) + +const ( + beforeVistaAppDir = "Application Data" + sinceVistaAppDir = "AppData/Roaming" +) + +var defaultConfigPaths = []string{ + path.Join("$HOME", beforeVistaAppDir, "Pastel"), + path.Join("$HOME", sinceVistaAppDir, "Pastel"), + ".", +} + +// DefaultPath returns the default config path for Windows OS. +func DefaultPath() string { + homeDir, _ := os.UserHomeDir() + appDir := beforeVistaAppDir + + v, _ := syscall.GetVersion() + if v&0xff > 5 { + appDir = sinceVistaAppDir + } + return filepath.Join(homeDir, filepath.FromSlash(appDir), "Pastel") +} diff --git a/pkg/errgroup/errgroup.go b/pkg/errgroup/errgroup.go new file mode 100644 index 00000000..ca2b3fe8 --- /dev/null +++ b/pkg/errgroup/errgroup.go @@ -0,0 +1,37 @@ +package errgroup + +import ( + "context" + "runtime/debug" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + + "golang.org/x/sync/errgroup" +) + +// A Group is a collection of goroutines working on subtasks that are part of the same overall task. +type Group struct { + *errgroup.Group +} + +// Go calls the given function in a new goroutine and tries to recover from panics. +func (group *Group) Go(fn func() error) { + group.Group.Go(func() (err error) { + defer errors.Recover(func(recErr error) { + fields := logtrace.Fields{ + logtrace.FieldError: recErr.Error(), + logtrace.FieldStackTrace: debug.Stack(), + } + logtrace.Error(context.Background(), "errgroup panic", fields) + err = recErr + }) + return fn() + }) +} + +// WithContext returns a new Group and an associated Context derived from ctx. +func WithContext(ctx context.Context) (*Group, context.Context) { + group, ctx := errgroup.WithContext(ctx) + return &Group{group}, ctx +} diff --git a/pkg/logtrace/fields.go b/pkg/logtrace/fields.go index 9833af8c..9b28ac85 100644 --- a/pkg/logtrace/fields.go +++ b/pkg/logtrace/fields.go @@ -4,15 +4,18 @@ package logtrace type Fields map[string]interface{} const ( - FieldCorrelationID = "correlation_id" - FieldMethod = "method" - FieldModule = "module" - FieldError = "error" - FieldStatus = "status" - FieldBlockHeight = "block_height" - FieldLimit = "limit" - FieldSupernodeState = "supernode_state" - FieldRequest = "request" + FieldCorrelationID = "correlation_id" + FieldMethod = "method" + FieldModule = "module" + FieldError = "error" + FieldStatus = "status" + FieldBlockHeight = "block_height" + FieldLimit = "limit" + FieldSupernodeState = "supernode_state" + FieldRequest = "request" + FieldSupernodeAccountAddress = "supernode_account_address" + FieldIsPrimary = "is_primary" + FieldStackTrace = "stack_trace" ValueLumeraSDK = "lumera-sdk" ValueActionSDK = "action-sdk" diff --git a/pkg/lumera/client.go b/pkg/lumera/client.go index edccfb54..a2cc142a 100644 --- a/pkg/lumera/client.go +++ b/pkg/lumera/client.go @@ -53,7 +53,7 @@ func newClient(ctx context.Context, opts ...Option) (Client, error) { return nil, err } - nodeModule, err := node.NewModule(conn.GetConn()) + nodeModule, err := node.NewModule(conn.GetConn(), cfg.keyring) if err != nil { conn.Close() return nil, err diff --git a/pkg/lumera/config.go b/pkg/lumera/config.go index 6a370bf0..9c9208bc 100644 --- a/pkg/lumera/config.go +++ b/pkg/lumera/config.go @@ -1,5 +1,7 @@ package lumera +import "github.com/cosmos/cosmos-sdk/crypto/keyring" + // Config holds all the configuration needed for the client type Config struct { // GRPCAddr is the gRPC endpoint address @@ -10,6 +12,9 @@ type Config struct { // Timeout is the default request timeout in seconds Timeout int + + // keyring is the keyring conf for the node sign & verify + keyring keyring.Keyring } // DefaultConfig returns a default configuration diff --git a/pkg/lumera/interface.go b/pkg/lumera/interface.go index 1e1d8737..06be0759 100644 --- a/pkg/lumera/interface.go +++ b/pkg/lumera/interface.go @@ -11,7 +11,6 @@ import ( // Client defines the main interface for interacting with Lumera blockchain type Client interface { - // Module accessors Action() action.Module SuperNode() supernode.Module Tx() tx.Module diff --git a/pkg/lumera/modules/node/impl.go b/pkg/lumera/modules/node/impl.go index e1d9deea..c32bffeb 100644 --- a/pkg/lumera/modules/node/impl.go +++ b/pkg/lumera/modules/node/impl.go @@ -3,6 +3,9 @@ package node import ( "context" "fmt" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/types" + signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" "google.golang.org/grpc" @@ -10,17 +13,19 @@ import ( // module implements the Module interface type module struct { + kr keyring.Keyring client cmtservice.ServiceClient } // newModule creates a new Node module client -func newModule(conn *grpc.ClientConn) (Module, error) { +func newModule(conn *grpc.ClientConn, keyring keyring.Keyring) (Module, error) { if conn == nil { return nil, fmt.Errorf("connection cannot be nil") } return &module{ client: cmtservice.NewServiceClient(conn), + kr: keyring, }, nil } @@ -87,3 +92,45 @@ func (m *module) GetValidatorSetByHeight(ctx context.Context, height int64) (*cm return resp, nil } + +func (m *module) Sign(snAccAddress string, data []byte) (signature []byte, err error) { + accAddr, err := types.AccAddressFromBech32(snAccAddress) + if err != nil { + return signature, fmt.Errorf("invalid address: %w", err) + } + + _, err = m.kr.KeyByAddress(accAddr) + if err != nil { + return signature, fmt.Errorf("address not found in keyring: %w", err) + } + + signature, _, err = m.kr.SignByAddress(accAddr, data, signingtypes.SignMode_SIGN_MODE_DIRECT) + if err != nil { + return nil, fmt.Errorf("failed to sign data: %w", err) + } + + return signature, nil +} + +func (m *module) Verify(accAddress string, data, signature []byte) (err error) { + addr, err := types.AccAddressFromBech32(accAddress) + if err != nil { + return fmt.Errorf("invalid address: %w", err) + } + + keyInfo, err := m.kr.KeyByAddress(addr) + if err != nil { + return fmt.Errorf("address not found in keyring: %w", err) + } + + pubKey, err := keyInfo.GetPubKey() + if err != nil { + return fmt.Errorf("failed to get public key: %w", err) + } + + if !pubKey.VerifySignature(data, signature) { + return fmt.Errorf("invalid signature") + } + + return nil +} diff --git a/pkg/lumera/modules/node/interface.go b/pkg/lumera/modules/node/interface.go index 0694e2af..60ef53c3 100644 --- a/pkg/lumera/modules/node/interface.go +++ b/pkg/lumera/modules/node/interface.go @@ -2,6 +2,7 @@ package node import ( "context" + "github.com/cosmos/cosmos-sdk/crypto/keyring" cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" "google.golang.org/grpc" @@ -26,9 +27,15 @@ type Module interface { // GetValidatorSetByHeight gets the validator set at a specific height GetValidatorSetByHeight(ctx context.Context, height int64) (*cmtservice.GetValidatorSetByHeightResponse, error) + + // Sign signs the given bytes with the supernodeAccountAddress and returns the signature + Sign(snAccAddress string, data []byte) (signature []byte, err error) + + // Verify verifies the given bytes with given supernodeAccAddress public key and returns the error + Verify(accAddress string, data, signature []byte) (err error) } // NewModule creates a new Node module client -func NewModule(conn *grpc.ClientConn) (Module, error) { - return newModule(conn) +func NewModule(conn *grpc.ClientConn, kr keyring.Keyring) (Module, error) { + return newModule(conn, kr) } diff --git a/pkg/lumera/modules/supernode/impl.go b/pkg/lumera/modules/supernode/impl.go index b31d52c6..cc12f430 100644 --- a/pkg/lumera/modules/supernode/impl.go +++ b/pkg/lumera/modules/supernode/impl.go @@ -3,9 +3,12 @@ package supernode import ( "context" "fmt" + "github.com/LumeraProtocol/supernode/pkg/errors" "github.com/LumeraProtocol/lumera/x/supernode/types" + "google.golang.org/grpc" + "sort" ) // module implements the Module interface @@ -47,3 +50,34 @@ func (m *module) GetSuperNode(ctx context.Context, address string) (*types.Query return resp, nil } + +func (m *module) GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*types.SuperNode, error) { + resp, err := m.client.GetSuperNodeBySuperNodeAddress(ctx, &types.QueryGetSuperNodeBySuperNodeAddressRequest{ + SupernodeAddress: address, + }) + if err != nil { + fmt.Errorf("failed to get supernode: %w", err) + } + + return resp.Supernode, nil +} + +func Exists(nodes []*types.SuperNode, snAccAddress string) bool { + for _, sn := range nodes { + if sn.SupernodeAccount == snAccAddress { + return true + } + } + return false +} + +func GetLatestIP(supernode *types.SuperNode) (string, error) { + if len(supernode.PrevIpAddresses) == 0 { + return "", errors.Errorf("no ip history exists for the supernode") + } + sort.Slice(supernode.PrevIpAddresses, func(i, j int) bool { + return supernode.PrevIpAddresses[i].GetHeight() > supernode.PrevIpAddresses[j].GetHeight() + }) + + return supernode.PrevIpAddresses[0].Address, nil +} diff --git a/pkg/lumera/modules/supernode/interface.go b/pkg/lumera/modules/supernode/interface.go index 37e57b12..ed830f36 100644 --- a/pkg/lumera/modules/supernode/interface.go +++ b/pkg/lumera/modules/supernode/interface.go @@ -11,6 +11,7 @@ import ( type Module interface { GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*types.QueryGetTopSuperNodesForBlockResponse, error) GetSuperNode(ctx context.Context, address string) (*types.QueryGetSuperNodeResponse, error) + GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*types.SuperNode, error) } // NewModule creates a new SuperNode module client diff --git a/pkg/lumera/options.go b/pkg/lumera/options.go index 862194ac..7bc5220e 100644 --- a/pkg/lumera/options.go +++ b/pkg/lumera/options.go @@ -1,5 +1,7 @@ package lumera +import "github.com/cosmos/cosmos-sdk/crypto/keyring" + // Option is a function that applies a change to Config type Option func(*Config) @@ -23,3 +25,10 @@ func WithTimeout(seconds int) Option { c.Timeout = seconds } } + +// WithKeyring sets the keyring conf for the node +func WithKeyring(k keyring.Keyring) Option { + return func(c *Config) { + c.keyring = k + } +} diff --git a/pkg/raptorq/config.go b/pkg/raptorq/config.go index 92203643..ad6d0dd3 100644 --- a/pkg/raptorq/config.go +++ b/pkg/raptorq/config.go @@ -15,6 +15,8 @@ type Config struct { // the queries port to listen for connections on Port int `mapstructure:"port" json:"port,omitempty"` + + RqFilesDir string `mapstructure:"rqfiles_dir" json:"rqfiles_dir,omitempty"` } // NewConfig returns a new Config instance. diff --git a/pkg/raptorq/gen_rq_identifier_files.go b/pkg/raptorq/gen_rq_identifier_files.go new file mode 100644 index 00000000..3b851856 --- /dev/null +++ b/pkg/raptorq/gen_rq_identifier_files.go @@ -0,0 +1,38 @@ +package raptorq + +import ( + "context" + + "github.com/LumeraProtocol/supernode/pkg/logtrace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s *raptorQServerClient) GenRQIdentifiersFiles(ctx context.Context, fields logtrace.Fields, data []byte, operationBlockHash string, pastelID string, rqMax uint32) (RQIDsIc uint32, RQIDs []string, RQIDsFile []byte, RQEncodeParams EncoderParameters, signature []byte, err error) { + encodeInfo, err := s.encodeInfo(ctx, fields, data, rqMax, operationBlockHash, pastelID) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, status.Errorf(codes.Internal, "generate RaptorQ symbols identifiers") + } + + var rqIDsFilesCount uint32 + for i := range encodeInfo.SymbolIDFiles { + if len(encodeInfo.SymbolIDFiles[i].SymbolIdentifiers) == 0 { + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, status.Errorf(codes.Internal, "empty symbol identifiers - rawFile") + } + + RQIDsIc, RQIDs, RQIDsFile, signature, err := s.generateRQIDs(ctx, encodeInfo.SymbolIDFiles[i], pastelID, rqMax) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, status.Errorf(codes.Internal, "create RQIDs file") + } + rqIDsFilesCount++ + break + } + if rqIDsFilesCount != rqMax { + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, status.Errorf(codes.Internal, "number of RaptorQ symbol identifiers files must be %d, most probably old version of rq-services is installed", rqMax) + } + + RQEncodeParams = encodeInfo.EncoderParam + + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, nil +} diff --git a/pkg/raptorq/helper.go b/pkg/raptorq/helper.go new file mode 100644 index 00000000..3d8f95dc --- /dev/null +++ b/pkg/raptorq/helper.go @@ -0,0 +1,158 @@ +package raptorq + +import ( + "bytes" + "context" + "encoding/json" + "math/rand/v2" + "os" + "strconv" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/pkg/utils" + "github.com/cosmos/btcutil/base58" +) + +const ( + InputEncodeFileName = "input.data" + SeparatorByte byte = 46 // separator in dd_and_fingerprints.signature i.e. '.' +) + +// EncoderParameters represents the encoding params used by raptorq services +type EncoderParameters struct { + Oti []byte +} + +// EncodeInfo represents the response returns by encodeInfo method +type EncodeInfo struct { + SymbolIDFiles map[string]RawSymbolIDFile + EncoderParam EncoderParameters +} + +// Encode represents the response returns by Encode method +type Encode struct { + Symbols map[string][]byte + EncoderParam EncoderParameters +} + +// Decode represents the response returns by Decode method +type Decode struct { + File []byte +} + +func (s *raptorQServerClient) encodeInfo(ctx context.Context, data []byte, copies uint32, blockHash string, pastelID string) (*EncodeInfo, error) { + s.semaphore <- struct{}{} // Acquire slot + defer func() { + <-s.semaphore // Release the semaphore slot + }() + + if data == nil { + return nil, errors.Errorf("invalid data") + } + + _, inputPath, err := createInputEncodeFile(s.config.RqFilesDir, data) + if err != nil { + return nil, errors.Errorf("create input file: %w", err) + } + res, err := s.EncodeMetaData(ctx, EncodeMetadataRequest{ + FilesNumber: copies, + BlockHash: blockHash, + PastelId: pastelID, + Path: inputPath, + }) + if err != nil { + return nil, errors.Errorf("encode metadata %s: %w", res.Path, err) + } + + filesMap, err := scanSymbolIDFiles(res.Path) + if err != nil { + return nil, errors.Errorf("scan symbol id files folder %s: %w", res.Path, err) + } + + if len(filesMap) != int(copies) { + return nil, errors.Errorf("symbol id files count not match: expect %d, output %d", copies, len(filesMap)) + } + + output := &EncodeInfo{ + SymbolIDFiles: filesMap, + EncoderParam: EncoderParameters{ + Oti: res.EncoderParameters, + }, + } + + if err := os.Remove(inputPath); err != nil { + logtrace.Error(ctx, "encode info: error removing input file", logtrace.Fields{"Path": inputPath}) + } + + return output, nil +} + +func (s *raptorQServerClient) generateRQIDs(ctx context.Context, rawFile RawSymbolIDFile, snAccAddress string, maxFiles uint32) (RQIDsIc uint32, RQIDs []string, RQIDsFile []byte, signature []byte, err error) { + rqIDsfile, err := json.Marshal(rawFile) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, signature, errors.Errorf("marshal rqID file") + } + + // FIXME : msgs param + signature, err = s.lumeraClient.Node().Sign(snAccAddress, rqIDsfile) // FIXME : confirm the data + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, signature, errors.Errorf("sign identifiers file: %w", err) + } + + encRqIDsfile := utils.B64Encode(rqIDsfile) + + var buffer bytes.Buffer + buffer.Write(encRqIDsfile) + buffer.WriteString(".") + buffer.Write(signature) + rqIDFile := buffer.Bytes() + + RQIDsIc = rand.Uint32() + RQIDs, _, err = GetIDFiles(ctx, rqIDFile, RQIDsIc, maxFiles) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, signature, errors.Errorf("get ID Files: %w", err) + } + + comp, err := utils.HighCompress(ctx, rqIDFile) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, signature, errors.Errorf("compress: %w", err) + } + RQIDsFile = utils.B64Encode(comp) + + return RQIDsIc, RQIDs, RQIDsFile, signature, nil +} + +// GetIDFiles generates ID Files for dd_and_fingerprints files and rq_id files +// file is b64 encoded file appended with signatures and compressed, ic is the initial counter +// and max is the number of ids to generate +func GetIDFiles(ctx context.Context, file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { + idFiles := make([][]byte, 0, max) + ids = make([]string, 0, max) + var buffer bytes.Buffer + + for i := uint32(0); i < max; i++ { + buffer.Reset() + counter := ic + i + + buffer.Write(file) + buffer.WriteByte(SeparatorByte) + buffer.WriteString(strconv.Itoa(int(counter))) // Using the string representation to maintain backward compatibility + + compressedData, err := utils.HighCompress(ctx, buffer.Bytes()) // Ensure you're using the same compression level + if err != nil { + return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) + } + + idFiles = append(idFiles, compressedData) + + hash, err := utils.Sha3256hash(compressedData) + if err != nil { + return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) + } + + ids = append(ids, base58.Encode(hash)) + } + + return ids, idFiles, nil +} diff --git a/pkg/raptorq/interfaces.go b/pkg/raptorq/interfaces.go index 4993d2e5..b6862e87 100644 --- a/pkg/raptorq/interfaces.go +++ b/pkg/raptorq/interfaces.go @@ -4,6 +4,8 @@ package raptorq import ( "context" + + "github.com/LumeraProtocol/supernode/pkg/logtrace" ) // ClientInterface represents a base connection interface. @@ -33,4 +35,6 @@ type RaptorQ interface { Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) // EncodeMetaData Get encode info(include encode parameters + symbol id files) EncodeMetaData(ctx context.Context, req EncodeMetadataRequest) (EncodeResponse, error) + // GenRQIdentifiersFiles generates the RQ identifier files + GenRQIdentifiersFiles(ctx context.Context, fields logtrace.Fields, data []byte, operationBlockHash string, pastelID string, rqMax uint32) (RQIDsIc uint32, RQIDs []string, RQIDsFile []byte, RQEncodeParams EncoderParameters, signature []byte, err error) } diff --git a/pkg/raptorq/rq_server_client.go b/pkg/raptorq/rq_server_client.go index 1d0341ab..877fc7f5 100644 --- a/pkg/raptorq/rq_server_client.go +++ b/pkg/raptorq/rq_server_client.go @@ -1,6 +1,7 @@ package raptorq import ( + "github.com/LumeraProtocol/supernode/pkg/lumera" "time" rq "github.com/LumeraProtocol/supernode/gen/raptorq" @@ -12,10 +13,11 @@ const ( ) type raptorQServerClient struct { - config *Config - conn *clientConn - rqService rq.RaptorQClient - semaphore chan struct{} // Semaphore to control concurrency + config *Config + conn *clientConn + rqService rq.RaptorQClient + lumeraClient lumera.Client + semaphore chan struct{} // Semaphore to control concurrency } func newRaptorQServerClient(conn *clientConn, config *Config) RaptorQ { diff --git a/pkg/storage/file_storage_interface.go b/pkg/storage/file_storage_interface.go new file mode 100644 index 00000000..faa1b0ca --- /dev/null +++ b/pkg/storage/file_storage_interface.go @@ -0,0 +1,45 @@ +//go:generate mockery --name=FileStorageInterface +//go:generate mockery --name=FileInterface + +package storage + +import ( + "io" + + "github.com/LumeraProtocol/supernode/pkg/errors" +) + +var ( + // ErrFileNotFound is returned when file isn't found. + ErrFileNotFound = errors.New("file not found") + // ErrFileExists is returned when file already exists. + ErrFileExists = errors.New("file exists") +) + +// FileStorageInterface represents a file storage. +type FileStorageInterface interface { + // Open opens a file and returns file descriptor. + // If name is not found, ErrFileNotFound is returned. + Open(name string) (file FileInterface, err error) + + // Create creates a new file with the given name and returns file descriptor. + Create(name string) (file FileInterface, err error) + + // Remove removes a file by the given name. + Remove(name string) error + + // Rename renames oldname to newname. + Rename(oldname, newname string) error +} + +// FileInterface represents a file. +type FileInterface interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string +} diff --git a/pkg/storage/files/file.go b/pkg/storage/files/file.go new file mode 100644 index 00000000..d304abe5 --- /dev/null +++ b/pkg/storage/files/file.go @@ -0,0 +1,382 @@ +package files + +import ( + "bytes" + "fmt" + "image" + "image/gif" + "image/jpeg" + "image/png" + "io" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage" + + "github.com/disintegration/imaging" + "github.com/kolesa-team/go-webp/decoder" + "github.com/kolesa-team/go-webp/encoder" + "github.com/kolesa-team/go-webp/webp" +) + +// File represents a file. +type File struct { + fmt.Stringer + sync.Mutex + + storage.FileInterface + storage *Storage + + // if a file was created during the process, it should be deleted at the end. + isCreated bool + + // unique name within the storage. + name string + + // file format, png, jpg, etc. + format Format +} + +// Name returns filename. +func (file *File) Name() string { + return file.name +} + +func (file *File) String() string { + return file.name +} + +// SetFormatFromExtension parses and sets image format from filename extension: +// "jpg" (or "jpeg"), "png", "gif" are supported. +func (file *File) SetFormatFromExtension(ext string) error { + if format, ok := formatExts[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok { + return file.SetFormat(format) + } + return ErrUnsupportedFormat +} + +// SetFormat sets file extension. +func (file *File) SetFormat(format Format) error { + file.format = format + + newname := fmt.Sprintf("%s.%s", strings.TrimSuffix(file.name, filepath.Ext(file.name)), format) + oldname := file.name + file.name = newname + + if err := file.storage.Update(oldname, newname, file); err != nil { + return err + } + + if !file.isCreated { + return nil + } + return file.storage.Rename(oldname, newname) +} + +// Format returns file extension. +func (file *File) Format() Format { + return file.format +} + +// Open opens a file and returns file descriptor. +// If file is not found, storage.ErrFileNotFound is returned. +func (file *File) Open() (storage.FileInterface, error) { + file.Lock() + defer file.Unlock() + + return file.storage.Open(file.Name()) +} + +// Create creates a file and returns file descriptor. +func (file *File) Create() (storage.FileInterface, error) { + file.Lock() + defer file.Unlock() + + fl, err := file.storage.Create(file.name) + if err != nil { + return nil, err + } + + file.isCreated = true + return fl, nil +} + +// Remove removes the file. +func (file *File) Remove() error { + file.Lock() + defer file.Unlock() + + delete(file.storage.filesMap, file.name) + + if !file.isCreated { + return nil + } + file.isCreated = false + + return file.storage.Remove(file.name) +} + +// Copy creates a copy of the current file. +func (file *File) Copy() (*File, error) { + src, err := file.Open() + if err != nil { + return nil, err + } + defer src.Close() + + newFile := file.storage.NewFile() + if err := newFile.SetFormat(file.format); err != nil { + return nil, err + } + + dst, err := newFile.Create() + if err != nil { + return nil, err + } + defer dst.Close() + + if _, err := io.Copy(dst, src); err != nil { + return nil, errors.Errorf("copy file: %w", err) + } + return newFile, nil +} + +// Bytes returns the contents of the file by bytes. +func (file *File) Bytes() ([]byte, error) { + f, err := file.Open() + if err != nil { + return nil, err + } + defer f.Close() + + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(f); err != nil { + return nil, errors.Errorf("read file: %w", err) + } + + return buf.Bytes(), nil +} + +// Write writes data to the file. +func (file *File) Write(data []byte) (n int, err error) { + f, err := file.Create() + if err != nil { + return 0, errors.Errorf("create file: %w", err) + } + defer f.Close() + + n, err = f.Write(data) + if err != nil { + return n, errors.Errorf("write file: %w", err) + } + + return +} + +// ResizeImage resizes image. +func (file *File) ResizeImage(width, height int) error { + src, err := file.LoadImage() + if err != nil { + return err + } + + dst := imaging.Resize(src, width, height, imaging.Lanczos) + + return file.SaveImage(dst) +} + +// RemoveAfter removes the file after the specified duration. +func (file *File) RemoveAfter(d time.Duration) { + go func() { + time.AfterFunc(d, func() { file.Remove() }) + }() +} + +// LoadImage opens images from the file. +func (file *File) LoadImage() (image.Image, error) { + f, err := file.Open() + if err != nil { + return nil, err + } + defer f.Close() + + img, _, err := image.Decode(f) + if err != nil { + // Reset the reader to the beginning of the file + _, errSeek := f.Seek(0, io.SeekStart) + if errSeek != nil { + return nil, errors.Errorf("reset file reader: %w", errSeek) + } + + var errWebp error + img, errWebp = webp.Decode(f, &decoder.Options{}) + if errWebp != nil { + return nil, errors.Errorf("decode image(%s) - %w - tried webp as well: %w", f.Name(), err, errWebp) + } + } + + return img, nil +} + +// SaveImage saves image to the file. +func (file *File) SaveImage(img image.Image) error { + f, err := file.Create() + if err != nil { + return err + } + defer f.Close() + + switch file.format { + case JPEG: + if nrgba, ok := img.(*image.NRGBA); ok && nrgba.Opaque() { + rgba := &image.RGBA{ + Pix: nrgba.Pix, + Stride: nrgba.Stride, + Rect: nrgba.Rect, + } + if err := jpeg.Encode(f, rgba, nil); err != nil { + return errors.Errorf("encode jpeg rgba(%s): %w", f.Name(), err) + } + return nil + } + if err := jpeg.Encode(f, img, nil); err != nil { + return errors.Errorf("encode jpeg(%s): %w", f.Name(), err) + } + return nil + + case PNG: + encoder := png.Encoder{CompressionLevel: png.DefaultCompression} + if err := encoder.Encode(f, img); err != nil { + return errors.Errorf("encode png(%s): %w", f.Name(), err) + } + return nil + + case GIF: + if err := gif.Encode(f, img, nil); err != nil { + return errors.Errorf("encode gif(%s): %w", f.Name(), err) + } + return nil + case WEBP: + opts, err := encoder.NewLosslessEncoderOptions(encoder.PresetDefault, 0) + if err != nil { + return errors.Errorf("create lossless encoder option %w", err) + } + if err := webp.Encode(f, img, opts); err != nil { + return errors.Errorf("encode webp(%s): %w", f.Name(), err) + } + return nil + + } + + return ErrUnsupportedFormat +} + +// Thumbnail creates a thumbnail file from the NFT file and store in to storage layer +func (file *File) Thumbnail(coordinate ThumbnailCoordinate) (*File, error) { + f := NewFile(file.storage, "thumbnail-of-"+file.name) + if f == nil { + return nil, errors.Errorf("create new file for thumbnail-of-%q", file.Name()) + } + if err := f.SetFormat(file.Format()); err != nil { + return nil, errors.Errorf("set format for thumbnail-of-%q", file.Name()) + } + + img, err := file.LoadImage() + if err != nil { + return nil, errors.Errorf("load image from file(%s): %w", file.Name(), err) + } + + rect := image.Rect(int(coordinate.TopLeftX), int(coordinate.TopLeftY), int(coordinate.BottomRightX), int(coordinate.BottomRightY)) + thumbnail := imaging.Crop(img, rect) + if thumbnail == nil { + return nil, errors.Errorf("generate thumbnail(%s): %w", file.Name(), err) + } + + if err := f.SaveImage(thumbnail); err != nil { + return nil, errors.Errorf("save thumbnail(%s): %w", file.Name(), err) + } + + return f, nil +} + +// UpdateFormat updates file format +func (file *File) UpdateFormat() error { + f, err := file.Open() + if err != nil { + return err + } + defer f.Close() + + // Try decoding with the standard library first + _, format, err := image.Decode(f) + if err != nil { + // If standard decoding fails, reset the reader and try WebP decoding + _, errSeek := f.Seek(0, io.SeekStart) + if errSeek != nil { + return errors.Errorf("reset file reader: %w", errSeek) + } + + _, errWebp := webp.Decode(f, &decoder.Options{}) + if errWebp != nil { + return errors.Errorf("decode image(%s) in updateFormat - tried webp as well: %w", f.Name(), errWebp) + } + format = "webp" + } + + err = file.SetFormatFromExtension(format) + if err != nil { + log.WithError(err).Error(fmt.Sprintf("not able to set extension:%s", err.Error())) + return errors.Errorf("set file format(%s): %w", file.Name(), err) + } + + return nil +} + +// Encoder represents an image encoder. +type Encoder interface { + Encode(img image.Image) (image.Image, error) +} + +// Encode encodes the image by the given encoder. +func (file *File) Encode(enc Encoder) error { + img, err := file.LoadImage() + if err != nil { + return fmt.Errorf("load image: %w", err) + } + + encImg, err := enc.Encode(img) + if err != nil { + return fmt.Errorf("common encode image: %w", err) + } + return file.SaveImage(encImg) +} + +// Decoder represents an image decoder. +type Decoder interface { + Decode(img image.Image) error +} + +// Decode decodes the image by the given decoder. +func (file *File) Decode(dec Decoder) error { + img, err := file.LoadImage() + if err != nil { + return err + } + if err := dec.Decode(img); err != nil { + return fmt.Errorf("common decode image: %w", err) + } + + return nil +} + +// NewFile returns a newFile File instance. +func NewFile(storage *Storage, name string) *File { + return &File{ + storage: storage, + name: name, + } +} diff --git a/pkg/storage/files/format.go b/pkg/storage/files/format.go new file mode 100644 index 00000000..5a54fc23 --- /dev/null +++ b/pkg/storage/files/format.go @@ -0,0 +1,36 @@ +package files + +import ( + "github.com/LumeraProtocol/supernode/pkg/errors" +) + +// ErrUnsupportedFormat means the given image format is not supported. +var ErrUnsupportedFormat = errors.New("imaging: unsupported image format") + +// Image file formats. +const ( + JPEG Format = iota + PNG + GIF + WEBP +) + +var formatExts = map[string]Format{ + "jpg": JPEG, + "jpeg": JPEG, + "png": PNG, + "webp": WEBP, +} + +var formatNames = map[Format]string{ + JPEG: "jpeg", + PNG: "png", + WEBP: "webp", +} + +// Format is an image file format. +type Format int + +func (f Format) String() string { + return formatNames[f] +} diff --git a/pkg/storage/files/storage.go b/pkg/storage/files/storage.go new file mode 100644 index 00000000..9ce1d4e7 --- /dev/null +++ b/pkg/storage/files/storage.go @@ -0,0 +1,82 @@ +package files + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/random" + "github.com/LumeraProtocol/supernode/pkg/storage" +) + +// Storage represents a file storage. +type Storage struct { + storage.FileStorageInterface + + idCounter int64 + prefix string + filesMap map[string]*File +} + +// Run removes all files when the context is canceled. +func (storage *Storage) Run(ctx context.Context) error { + <-ctx.Done() + + var errs error + for _, file := range storage.filesMap { + if err := file.Remove(); err != nil { + errs = errors.Append(errs, err) + } + } + + return errs +} + +// NewFile returns a new File instance with a unique name. +func (storage *Storage) NewFile() *File { + id := atomic.AddInt64(&storage.idCounter, 1) + name := fmt.Sprintf("%s-%d", storage.prefix, id) + + file := NewFile(storage, name) + storage.filesMap[name] = file + + return file +} + +// File returns File by the given name. +func (storage *Storage) File(name string) (*File, error) { + file, ok := storage.filesMap[name] + if !ok { + return nil, errors.New("image not found") + } + return file, nil +} + +// Update changes the key to identify a *File to a new key +func (storage *Storage) Update(oldname, newname string, file *File) error { + f, ok := storage.filesMap[oldname] + if !ok { + return errors.New("file not found") + } + + if f != file { + return errors.New("not the same file") + } + + delete(storage.filesMap, oldname) + storage.filesMap[newname] = file + return nil +} + +// NewStorage returns a new Storage instance. +func NewStorage(storage storage.FileStorageInterface) *Storage { + prefix, _ := random.String(8, random.Base62Chars) + + return &Storage{ + FileStorageInterface: storage, + + prefix: prefix, + filesMap: make(map[string]*File), + } +} diff --git a/pkg/storage/files/storage_test.go b/pkg/storage/files/storage_test.go new file mode 100644 index 00000000..43087f95 --- /dev/null +++ b/pkg/storage/files/storage_test.go @@ -0,0 +1,37 @@ +package files + +import ( + "os" + "path/filepath" + "testing" + + "github.com/LumeraProtocol/supernode/pkg/storage/fs" + "github.com/stretchr/testify/assert" +) + +func Test_StoreFileAfterSetFormat(t *testing.T) { + storage := NewStorage(fs.NewFileStorage(os.TempDir())) + + files := []struct { + name string + format Format + }{ + {"test.jpeg", JPEG}, + {"test.jpg", JPEG}, + {"test.png", PNG}, + {"test.webp", WEBP}, + } + + for _, file := range files { + f := storage.NewFile() + assert.NotNil(t, f) + + // + err := f.SetFormatFromExtension(filepath.Ext(file.name)) + assert.Equal(t, nil, err) + assert.Equal(t, file.format, f.format) + + _, err = storage.File(f.Name()) + assert.Equal(t, nil, err) + } +} diff --git a/pkg/storage/files/thumbnail.go b/pkg/storage/files/thumbnail.go new file mode 100644 index 00000000..b747c701 --- /dev/null +++ b/pkg/storage/files/thumbnail.go @@ -0,0 +1,9 @@ +package files + +// ThumbnailCoordinate contains coordinate of region crop by user +type ThumbnailCoordinate struct { + TopLeftX int64 `json:"top_left_x"` + TopLeftY int64 `json:"top_left_y"` + BottomRightX int64 `json:"bottom_right_x"` + BottomRightY int64 `json:"bottom_right_y"` +} diff --git a/pkg/storage/fs/file.go b/pkg/storage/fs/file.go new file mode 100644 index 00000000..fe9597b8 --- /dev/null +++ b/pkg/storage/fs/file.go @@ -0,0 +1,87 @@ +package fs + +import ( + "os" + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage" +) + +const ( + logPrefix = "storage-fs" +) + +// FS represents file system storage. +type FS struct { + dir string +} + +// Open implements storage.FileStorageInterface.Open +func (fs *FS) Open(filename string) (storage.FileInterface, error) { + filename = filepath.Join(fs.dir, filename) + + if _, err := os.Stat(filename); os.IsNotExist(err) { + return nil, storage.ErrFileNotFound + } + + file, err := os.Open(filename) + if err != nil { + return nil, errors.Errorf("open file %q: %w", filename, err) + } + return file, nil +} + +// Create implements storage.FileStorageInterface.Create +func (fs *FS) Create(filename string) (storage.FileInterface, error) { + filename = filepath.Join(fs.dir, filename) + + if _, err := os.Stat(filename); !os.IsNotExist(err) { + log.WithPrefix(logPrefix).Debugf("Rewrite file %q", filename) + } else { + log.WithPrefix(logPrefix).Debugf("Create file %q", filename) + } + + file, err := os.Create(filename) + if err != nil { + return nil, errors.Errorf("create file %q: %w", filename, err) + } + return file, nil +} + +// Remove implements storage.FileStorageInterface.Remove +func (fs *FS) Remove(filename string) error { + filename = filepath.Join(fs.dir, filename) + + log.WithPrefix(logPrefix).Debugf("Remove file %q", filename) + + if err := os.Remove(filename); err != nil { + return errors.Errorf("remove file %q: %w", filename, err) + } + return nil +} + +// Rename renames oldName to newName. +func (fs *FS) Rename(oldname, newname string) error { + if oldname == newname { + return nil + } + + oldname = filepath.Join(fs.dir, oldname) + newname = filepath.Join(fs.dir, newname) + + log.WithPrefix(logPrefix).Debugf("Rename file %q to %q", oldname, newname) + + if err := os.Rename(oldname, newname); err != nil { + return errors.Errorf("rename file %q to %q: %w", oldname, newname, err) + } + return nil +} + +// NewFileStorage returns new FS instance. Where `dir` is the path for storing files. +func NewFileStorage(dir string) storage.FileStorageInterface { + return &FS{ + dir: dir, + } +} diff --git a/pkg/storage/fs/file_test.go b/pkg/storage/fs/file_test.go new file mode 100644 index 00000000..955e590f --- /dev/null +++ b/pkg/storage/fs/file_test.go @@ -0,0 +1,168 @@ +package fs + +import ( + "fmt" + "os" + "testing" + + "github.com/LumeraProtocol/supernode/pkg/storage" + + "github.com/stretchr/testify/assert" +) + +func TestFSOpen(t *testing.T) { + t.Parallel() + + type args struct { + filename string + } + + type handleFunc func(dir string, t assert.TestingT) + + testCases := []struct { + args args + createfunc handleFunc + assertion assert.ErrorAssertionFunc + valueAssert assert.ValueAssertionFunc + }{ + { + args: args{"test.txt"}, + assertion: assert.NoError, + valueAssert: assert.NotNil, + createfunc: func(dir string, t assert.TestingT) { + fs := &FS{ + dir: dir, + } + + _, err := fs.Create("test.txt") + assert.NoError(t, err) + }, + }, { + args: args{"non-exit.txt"}, + assertion: assert.Error, + valueAssert: assert.Nil, + createfunc: func(dir string, t assert.TestingT) {}, + }, + } + + for i, testCase := range testCases { + testCase := testCase + + t.Run("group", func(t *testing.T) { + dir, _ := os.MkdirTemp("", "*") + defer os.RemoveAll(dir) + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + testCase.createfunc(dir, t) + fs := &FS{dir: dir} + + got, err := fs.Open(testCase.args.filename) + testCase.assertion(t, err) + testCase.valueAssert(t, got) + }) + }) + + } +} + +func TestFSCreate(t *testing.T) { + t.Parallel() + + type args struct { + filename string + } + + testCases := []struct { + args args + assertion assert.ErrorAssertionFunc + }{ + { + args: args{"test-1.txt"}, + assertion: assert.NoError, + }, + } + + t.Run("group", func(t *testing.T) { + dir, _ := os.MkdirTemp("", "*") + defer os.RemoveAll(dir) + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + fs := &FS{ + dir: dir, + } + got, err := fs.Create(testCase.args.filename) + testCase.assertion(t, err) + assert.NotNil(t, got) + assert.FileExists(t, fmt.Sprintf("%s/%s", dir, testCase.args.filename)) + }) + } + }) +} + +func TestFSRemove(t *testing.T) { + t.Parallel() + + type args struct { + filename string + } + + testCases := []struct { + args args + assertion assert.ErrorAssertionFunc + }{ + { + args: args{"test-2.txt"}, + assertion: assert.NoError, + }, + } + + t.Run("group", func(t *testing.T) { + dir, _ := os.MkdirTemp("", "*") + defer os.RemoveAll(dir) + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + fs := &FS{ + dir: dir, + } + + _, err := fs.Create(testCase.args.filename) + assert.NoError(t, err) + + testCase.assertion(t, fs.Remove(testCase.args.filename)) + }) + } + + }) +} + +func TestNewFileStorage(t *testing.T) { + t.Parallel() + + type args struct { + dir string + } + + testCases := []struct { + args args + want storage.FileStorageInterface + }{ + { + args: args{"./"}, + want: &FS{dir: "./"}, + }, + } + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + assert.Equal(t, testCase.want, NewFileStorage(testCase.args.dir)) + }) + } +} diff --git a/pkg/storage/queries/health_check.go b/pkg/storage/queries/health_check.go new file mode 100644 index 00000000..c47db8cf --- /dev/null +++ b/pkg/storage/queries/health_check.go @@ -0,0 +1,430 @@ +package queries + +import ( + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + "github.com/LumeraProtocol/supernode/pkg/utils/metrics" + json "github.com/json-iterator/go" +) + +type HealthCheckChallengeQueries interface { + InsertHealthCheckChallengeMessage(challenge types.HealthCheckChallengeLogMessage) error + InsertBroadcastHealthCheckMessage(challenge types.BroadcastHealthCheckLogMessage) error + QueryHCChallengeMessage(challengeID string, messageType int) (challengeMessage types.HealthCheckChallengeLogMessage, err error) + GetHealthCheckChallengeMetricsByChallengeID(challengeID string) ([]types.HealthCheckChallengeLogMessage, error) + + GetHCMetricsByChallengeIDAndMessageType(challengeID string, messageType types.HealthCheckMessageType) ([]types.HealthCheckChallengeLogMessage, error) + BatchInsertHCMetrics(metrics []types.HealthCheckChallengeLogMessage) error + HealthCheckChallengeMetrics(timestamp time.Time) ([]types.HealthCheckChallengeLogMessage, error) + InsertHealthCheckChallengeMetric(metric types.HealthCheckChallengeMetric) error + GetHCSummaryStats(from time.Time) (hcMetrics metrics.HCMetrics, err error) + GetTotalHCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.HCMetrics, error) + GetMetricsDataByHealthCheckChallengeID(ctx context.Context, challengeID string) ([]types.HealthCheckMessage, error) + GetLastNHCMetrics() ([]types.NHcMetric, error) + + GetDistinctHCChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) + GetDistinctHCChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) +} + +// GetTotalHCGeneratedAndProcessedAndEvaluated retrieves the total health-check challenges generated/processed/evaluated +func (s *SQLiteStore) GetTotalHCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.HCMetrics, error) { + metrics := metrics.HCMetrics{} + + // Query for total number of challenges + totalChallengeQuery := "SELECT COUNT(DISTINCT challenge_id) FROM healthcheck_challenge_metrics WHERE message_type = 1 AND created_at > ?" + err := s.db.QueryRow(totalChallengeQuery, from).Scan(&metrics.TotalChallenges) + if err != nil { + return metrics, err + } + + // Query for total challenges responded + totalChallengesProcessedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM healthcheck_challenge_metrics WHERE message_type = 2 AND created_at > ?" + err = s.db.QueryRow(totalChallengesProcessedQuery, from).Scan(&metrics.TotalChallengesProcessed) + if err != nil { + return metrics, err + } + + totalChallengesEvaluatedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM healthcheck_challenge_metrics WHERE message_type = 3 AND created_at > ?" + err = s.db.QueryRow(totalChallengesEvaluatedQuery, from).Scan(&metrics.TotalChallengesEvaluatedByChallenger) + if err != nil { + return metrics, err + } + + return metrics, nil +} + +// GetHCObserversEvaluations retrieves the observer's evaluations +func (s *SQLiteStore) GetHCObserversEvaluations(from time.Time) ([]types.HealthCheckChallengeLogMessage, error) { + var messages []types.HealthCheckChallengeLogMessage + + query := "SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at FROM healthcheck_challenge_metrics WHERE message_type = 4 and created_at > ?" + rows, err := s.db.Query(query, from) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var msg types.HealthCheckChallengeLogMessage + err := rows.Scan(&msg.ID, &msg.ChallengeID, &msg.MessageType, &msg.Data, &msg.Sender, &msg.CreatedAt, &msg.UpdatedAt) + if err != nil { + return nil, err + } + messages = append(messages, msg) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return messages, nil +} + +// GetHCSummaryStats get health-check summary stats +func (s *SQLiteStore) GetHCSummaryStats(from time.Time) (hcMetrics metrics.HCMetrics, err error) { + hcStats := metrics.HCMetrics{} + hcMetrics, err = s.GetTotalHCGeneratedAndProcessedAndEvaluated(from) + if err != nil { + return hcMetrics, err + } + hcStats.TotalChallenges = hcMetrics.TotalChallenges + hcStats.TotalChallengesProcessed = hcMetrics.TotalChallengesProcessed + hcStats.TotalChallengesEvaluatedByChallenger = hcMetrics.TotalChallengesEvaluatedByChallenger + + hcObserversEvaluations, err := s.GetHCObserversEvaluations(from) + if err != nil { + return hcMetrics, err + } + log.WithField("observer_evaluations", len(hcObserversEvaluations)).Info("observer evaluations retrieved") + + observerEvaluationMetrics := processHCObserverEvaluations(hcObserversEvaluations) + log.WithField("observer_evaluation_metrics", len(observerEvaluationMetrics)).Info("observer evaluation metrics retrieved") + + for _, obMetrics := range observerEvaluationMetrics { + if obMetrics.ChallengesVerified >= 3 { + hcMetrics.TotalChallengesVerified++ + } else { + if obMetrics.FailedByInvalidTimestamps > 0 { + hcMetrics.SlowResponsesObservedByObservers++ + } + if obMetrics.FailedByInvalidSignatures > 0 { + hcMetrics.InvalidSignaturesObservedByObservers++ + } + if obMetrics.FailedByInvalidEvaluation > 0 { + hcMetrics.InvalidEvaluationObservedByObservers++ + } + } + } + + return hcMetrics, nil +} + +// GetHealthCheckChallengeMetricsByChallengeID gets the health-check challenge by ID +func (s *SQLiteStore) GetHealthCheckChallengeMetricsByChallengeID(challengeID string) ([]types.HealthCheckChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM healthcheck_challenge_metrics + WHERE challenge_id = ?;` + + rows, err := s.db.Query(query, challengeID) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.HealthCheckChallengeLogMessage + for rows.Next() { + var m types.HealthCheckChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetMetricsDataByHealthCheckChallengeID gets the metrics data by health-check challenge id +func (s *SQLiteStore) GetMetricsDataByHealthCheckChallengeID(ctx context.Context, challengeID string) (healthCheckChallengeMessages []types.HealthCheckMessage, err error) { + hcMetrics, err := s.GetHealthCheckChallengeMetricsByChallengeID(challengeID) + if err != nil { + return healthCheckChallengeMessages, err + } + log.WithContext(ctx).WithField("rows", len(hcMetrics)).Info("health-check-challenge metrics row count") + + for _, hcMetric := range hcMetrics { + msg := types.HealthCheckMessageData{} + if err := json.Unmarshal(hcMetric.Data, &msg); err != nil { + return healthCheckChallengeMessages, fmt.Errorf("cannot unmarshal health check challenge data: %w", err) + } + + healthCheckChallengeMessages = append(healthCheckChallengeMessages, types.HealthCheckMessage{ + ChallengeID: hcMetric.ChallengeID, + MessageType: types.HealthCheckMessageType(hcMetric.MessageType), + Sender: hcMetric.Sender, + SenderSignature: hcMetric.SenderSignature, + Data: msg, + }) + } + + return healthCheckChallengeMessages, nil +} + +// InsertHealthCheckChallengeMessage inserts failed healthcheck challenge to db +func (s *SQLiteStore) InsertHealthCheckChallengeMessage(challenge types.HealthCheckChallengeLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO healthcheck_challenge_messages(id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?);" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.MessageType, challenge.Data, challenge.Sender, challenge.SenderSignature, now, now) + + if err != nil { + return err + } + + return nil +} + +// InsertHealthCheckChallengeMetric inserts the health-check challenge metrics +func (s *SQLiteStore) InsertHealthCheckChallengeMetric(m types.HealthCheckChallengeMetric) error { + now := time.Now().UTC() + + const metricsQuery = "INSERT INTO healthcheck_challenge_metrics(id, challenge_id, message_type, data, sender_id, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(metricsQuery, m.ChallengeID, m.MessageType, m.Data, m.SenderID, now, now) + if err != nil { + return err + } + + return nil +} + +// BatchInsertHCMetrics inserts the health-check challenges in a batch +func (s *SQLiteStore) BatchInsertHCMetrics(metrics []types.HealthCheckChallengeLogMessage) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO healthcheck_challenge_metrics + (id, challenge_id, message_type, data, sender_id, created_at, updated_at) + VALUES (NULL,?,?,?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, metric := range metrics { + now := time.Now().UTC() + + _, err = stmt.Exec(metric.ChallengeID, metric.MessageType, metric.Data, metric.Sender, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} + +// HealthCheckChallengeMetrics retrieves all the metrics needs to be broadcast +func (s *SQLiteStore) HealthCheckChallengeMetrics(timestamp time.Time) ([]types.HealthCheckChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM healthcheck_challenge_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.HealthCheckChallengeLogMessage + for rows.Next() { + var m types.HealthCheckChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// InsertBroadcastHealthCheckMessage inserts healthcheck healthcheck challenge msg to db +func (s *SQLiteStore) InsertBroadcastHealthCheckMessage(challenge types.BroadcastHealthCheckLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO broadcast_healthcheck_challenge_messages(id, challenge_id, data, challenger, recipient, observers, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?);" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.Data, challenge.Challenger, challenge.Recipient, challenge.Observers, now, now) + if err != nil { + return err + } + + return nil +} + +// QueryHCChallengeMessage retrieves healthcheck challenge message against challengeID and messageType +func (s *SQLiteStore) QueryHCChallengeMessage(challengeID string, messageType int) (challengeMessage types.HealthCheckChallengeLogMessage, err error) { + const selectQuery = "SELECT * FROM healthcheck_challenge_messages WHERE challenge_id=? AND message_type=?" + err = s.db.QueryRow(selectQuery, challengeID, messageType).Scan( + &challengeMessage.ID, &challengeMessage.ChallengeID, &challengeMessage.MessageType, &challengeMessage.Data, + &challengeMessage.Sender, &challengeMessage.SenderSignature, &challengeMessage.CreatedAt, &challengeMessage.UpdatedAt) + + if err != nil { + return challengeMessage, err + } + + return challengeMessage, nil +} + +// GetHCMetricsByChallengeIDAndMessageType retrieves all the metrics by challengeID and messageType +func (s *SQLiteStore) GetHCMetricsByChallengeIDAndMessageType(challengeID string, messageType types.HealthCheckMessageType) ([]types.HealthCheckChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM healthcheck_challenge_metrics + WHERE challenge_id = ? + AND message_type = ?;` + + rows, err := s.db.Query(query, challengeID, int(messageType)) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.HealthCheckChallengeLogMessage + for rows.Next() { + var m types.HealthCheckChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +func processHCObserverEvaluations(observersEvaluations []types.HealthCheckChallengeLogMessage) map[string]HCObserverEvaluationMetrics { + evaluationMap := make(map[string]HCObserverEvaluationMetrics) + + for _, observerEvaluation := range observersEvaluations { + var oe types.HealthCheckMessageData + if err := json.Unmarshal(observerEvaluation.Data, &oe); err != nil { + continue + } + + oem, exists := evaluationMap[observerEvaluation.ChallengeID] + if !exists { + oem = HCObserverEvaluationMetrics{} // Initialize if not exists + } + + if isHCObserverEvaluationVerified(oe.ObserverEvaluation) { + oem.ChallengesVerified++ + } else { + if !oe.ObserverEvaluation.IsChallengeTimestampOK || + !oe.ObserverEvaluation.IsProcessTimestampOK || + !oe.ObserverEvaluation.IsEvaluationTimestampOK { + oem.FailedByInvalidTimestamps++ + } + + if !oe.ObserverEvaluation.IsChallengerSignatureOK || + !oe.ObserverEvaluation.IsRecipientSignatureOK { + oem.FailedByInvalidSignatures++ + } + + if !oe.ObserverEvaluation.IsEvaluationResultOK { + oem.FailedByInvalidEvaluation++ + } + } + + evaluationMap[observerEvaluation.ChallengeID] = oem + } + + return evaluationMap +} + +func isHCObserverEvaluationVerified(observerEvaluation types.HealthCheckObserverEvaluationData) bool { + if !observerEvaluation.IsEvaluationResultOK { + return false + } + + if !observerEvaluation.IsChallengerSignatureOK { + return false + } + + if !observerEvaluation.IsRecipientSignatureOK { + return false + } + + if !observerEvaluation.IsChallengeTimestampOK { + return false + } + + if !observerEvaluation.IsProcessTimestampOK { + return false + } + + if !observerEvaluation.IsEvaluationTimestampOK { + return false + } + + return true +} + +// GetDistinctHCChallengeIDsCountForScoreAggregation gets the count of distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctHCChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) { + query := ` + SELECT COUNT(DISTINCT challenge_id) + FROM healthcheck_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + ` + + var challengeIDsCount int + err := s.db.QueryRow(query, after, before).Scan(&challengeIDsCount) + if err != nil { + return 0, err + } + + return challengeIDsCount, nil +} + +// GetDistinctHCChallengeIDs retrieves the distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctHCChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) { + offset := batchNumber * batchSizeForChallengeIDsRetrieval + + query := ` + SELECT DISTINCT challenge_id + FROM healthcheck_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + LIMIT ? OFFSET ? + ` + + rows, err := s.db.Query(query, after, before, batchSizeForChallengeIDsRetrieval, offset) + if err != nil { + return nil, err + } + defer rows.Close() + + var challengeIDs []string + for rows.Next() { + var challengeID string + if err := rows.Scan(&challengeID); err != nil { + return nil, err + } + challengeIDs = append(challengeIDs, challengeID) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return challengeIDs, nil +} diff --git a/pkg/storage/queries/local.go b/pkg/storage/queries/local.go new file mode 100644 index 00000000..e677de76 --- /dev/null +++ b/pkg/storage/queries/local.go @@ -0,0 +1,16 @@ +package queries + +import ( + "context" +) + +// LocalStoreInterface is interface for queries sqlite store +type LocalStoreInterface interface { + CloseHistoryDB(ctx context.Context) + + TaskHistoryQueries + SelfHealingQueries + StorageChallengeQueries + PingHistoryQueries + HealthCheckChallengeQueries +} diff --git a/pkg/storage/queries/ping_history.go b/pkg/storage/queries/ping_history.go new file mode 100644 index 00000000..84bfc6e0 --- /dev/null +++ b/pkg/storage/queries/ping_history.go @@ -0,0 +1,294 @@ +package queries + +import ( + "time" + + "github.com/LumeraProtocol/supernode/pkg/types" +) + +type PingHistoryQueries interface { + UpsertPingHistory(pingInfo types.PingInfo) error + GetPingInfoBySupernodeID(supernodeID string) (*types.PingInfo, error) + GetAllPingInfos() (types.PingInfos, error) + GetWatchlistPingInfo() ([]types.PingInfo, error) + GetAllPingInfoForOnlineNodes() (types.PingInfos, error) + UpdatePingInfo(supernodeID string, isOnWatchlist, isAdjusted bool) error + + UpdateSCMetricsBroadcastTimestamp(nodeID string, broadcastAt time.Time) error + UpdateMetricsBroadcastTimestamp(nodeID string) error + UpdateGenerationMetricsBroadcastTimestamp(nodeID string) error + UpdateExecutionMetricsBroadcastTimestamp(nodeID string) error + UpdateHCMetricsBroadcastTimestamp(nodeID string, broadcastAt time.Time) error +} + +// UpsertPingHistory inserts/update ping information into the ping_history table +func (s *SQLiteStore) UpsertPingHistory(pingInfo types.PingInfo) error { + now := time.Now().UTC() + + const upsertQuery = ` + INSERT INTO ping_history ( + supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(supernode_id) + DO UPDATE SET + total_pings = excluded.total_pings, + total_successful_pings = excluded.total_successful_pings, + avg_ping_response_time = excluded.avg_ping_response_time, + is_online = excluded.is_online, + is_on_watchlist = excluded.is_on_watchlist, + is_adjusted = excluded.is_adjusted, + last_seen = excluded.last_seen, + cumulative_response_time = excluded.cumulative_response_time, + updated_at = excluded.updated_at;` + + _, err := s.db.Exec(upsertQuery, + pingInfo.SupernodeID, pingInfo.IPAddress, pingInfo.TotalPings, + pingInfo.TotalSuccessfulPings, pingInfo.AvgPingResponseTime, + pingInfo.IsOnline, pingInfo.IsOnWatchlist, pingInfo.IsAdjusted, pingInfo.LastSeen.Time, pingInfo.CumulativeResponseTime, now, now) + if err != nil { + return err + } + + return nil +} + +// GetPingInfoBySupernodeID retrieves a ping history record by supernode ID +func (s *SQLiteStore) GetPingInfoBySupernodeID(supernodeID string) (*types.PingInfo, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + FROM ping_history + WHERE supernode_id = ?;` + + var pingInfo types.PingInfo + row := s.db.QueryRow(selectQuery, supernodeID) + + // Scan the row into the PingInfo struct + err := row.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ) + + if err != nil { + return nil, err + } + + return &pingInfo, nil +} + +// GetWatchlistPingInfo retrieves all the nodes that are on watchlist +func (s *SQLiteStore) GetWatchlistPingInfo() ([]types.PingInfo, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + FROM ping_history + WHERE is_on_watchlist = true AND is_adjusted = false;` + + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var pingInfos types.PingInfos + for rows.Next() { + var pingInfo types.PingInfo + if err := rows.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ); err != nil { + return nil, err + } + pingInfos = append(pingInfos, pingInfo) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return pingInfos, nil +} + +// UpdatePingInfo updates the ping info +func (s *SQLiteStore) UpdatePingInfo(supernodeID string, isOnWatchlist, isAdjusted bool) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET is_adjusted = ?, is_on_watchlist = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, isAdjusted, isOnWatchlist, supernodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateMetricsBroadcastTimestamp updates the ping info metrics_last_broadcast_at +func (s *SQLiteStore) UpdateMetricsBroadcastTimestamp(nodeID string) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().UTC(), nodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateGenerationMetricsBroadcastTimestamp updates the ping info generation_metrics_last_broadcast_at +func (s *SQLiteStore) UpdateGenerationMetricsBroadcastTimestamp(nodeID string) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET generation_metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().Add(-180*time.Minute).UTC(), nodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateExecutionMetricsBroadcastTimestamp updates the ping info execution_metrics_last_broadcast_at +func (s *SQLiteStore) UpdateExecutionMetricsBroadcastTimestamp(nodeID string) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET execution_metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().Add(-180*time.Minute).UTC(), nodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateSCMetricsBroadcastTimestamp updates the SC metrics last broadcast at timestamp +func (s *SQLiteStore) UpdateSCMetricsBroadcastTimestamp(nodeID string, updatedAt time.Time) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().UTC().Add(-180*time.Minute), nodeID) + if err != nil { + return err + } + + return nil +} + +// GetAllPingInfos retrieves all ping infos +func (s *SQLiteStore) GetAllPingInfos() (types.PingInfos, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + FROM ping_history + ` + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var pingInfos types.PingInfos + for rows.Next() { + + var pingInfo types.PingInfo + if err := rows.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ); err != nil { + return nil, err + } + pingInfos = append(pingInfos, pingInfo) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return pingInfos, nil +} + +// GetAllPingInfoForOnlineNodes retrieves all ping infos for nodes that are online +func (s *SQLiteStore) GetAllPingInfoForOnlineNodes() (types.PingInfos, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + metrics_last_broadcast_at, generation_metrics_last_broadcast_at, execution_metrics_last_broadcast_at, + created_at, updated_at + FROM ping_history + WHERE is_online = true` + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var pingInfos types.PingInfos + for rows.Next() { + + var pingInfo types.PingInfo + if err := rows.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.MetricsLastBroadcastAt, &pingInfo.GenerationMetricsLastBroadcastAt, &pingInfo.ExecutionMetricsLastBroadcastAt, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ); err != nil { + return nil, err + } + pingInfos = append(pingInfos, pingInfo) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return pingInfos, nil +} + +// UpdateHCMetricsBroadcastTimestamp updates health-check challenges last broadcast at +func (s *SQLiteStore) UpdateHCMetricsBroadcastTimestamp(nodeID string, updatedAt time.Time) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET health_check_metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().UTC().Add(-180*time.Minute), nodeID) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/storage/queries/self_healing.go b/pkg/storage/queries/self_healing.go new file mode 100644 index 00000000..5a4731f3 --- /dev/null +++ b/pkg/storage/queries/self_healing.go @@ -0,0 +1,644 @@ +package queries + +import ( + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + "github.com/LumeraProtocol/supernode/pkg/utils/metrics" + json "github.com/json-iterator/go" +) + +type SelfHealingQueries interface { + BatchInsertSelfHealingChallengeEvents(ctx context.Context, event []types.SelfHealingChallengeEvent) error + UpdateSHChallengeEventProcessed(challengeID string, isProcessed bool) error + GetSelfHealingChallengeEvents() ([]types.SelfHealingChallengeEvent, error) + CleanupSelfHealingChallenges() (err error) + QuerySelfHealingChallenges() (challenges []types.SelfHealingChallenge, err error) + + QueryMetrics(ctx context.Context, from time.Time, to *time.Time) (m metrics.Metrics, err error) + InsertSelfHealingGenerationMetrics(metrics types.SelfHealingGenerationMetric) error + InsertSelfHealingExecutionMetrics(metrics types.SelfHealingExecutionMetric) error + BatchInsertExecutionMetrics(metrics []types.SelfHealingExecutionMetric) error + GetSelfHealingGenerationMetrics(timestamp time.Time) ([]types.SelfHealingGenerationMetric, error) + GetSelfHealingExecutionMetrics(timestamp time.Time) ([]types.SelfHealingExecutionMetric, error) + GetLastNSHChallenges(ctx context.Context, n int) (types.SelfHealingReports, error) + GetSHChallengeReport(ctx context.Context, challengeID string) (types.SelfHealingReports, error) + GetSHExecutionMetrics(ctx context.Context, from time.Time) (metrics.SHExecutionMetrics, error) +} + +var ( + oneYearAgo = time.Now().AddDate(-1, 0, 0) +) + +// SHChallengeMetric represents the self-healing challenge metric +type SHChallengeMetric struct { + ChallengeID string + + // healer node + IsAck bool + IsAccepted bool + IsRejected bool + + // verifier nodes + HasMinVerifications bool + IsVerified bool + IsReconstructionRequiredVerified bool + IsReconstructionNotRequiredVerified bool + IsUnverified bool + IsReconstructionRequiredNotVerified bool + IsReconstructionNotRequiredNotVerified bool + IsReconstructionRequiredHashMismatch bool + + IsHealed bool +} + +type HCObserverEvaluationMetrics struct { + ChallengesVerified int + FailedByInvalidTimestamps int + FailedByInvalidSignatures int + FailedByInvalidEvaluation int +} + +type ObserverEvaluationMetrics struct { + ChallengesVerified int + FailedByInvalidTimestamps int + FailedByInvalidSignatures int + FailedByInvalidEvaluation int +} + +// InsertSelfHealingGenerationMetrics inserts self-healing generation metrics +func (s *SQLiteStore) InsertSelfHealingGenerationMetrics(metrics types.SelfHealingGenerationMetric) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO self_healing_generation_metrics(id, trigger_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(insertQuery, metrics.TriggerID, metrics.MessageType, metrics.Data, metrics.SenderID, metrics.SenderSignature, now, now) + if err != nil { + return err + } + + return nil +} + +// InsertSelfHealingExecutionMetrics inserts self-healing execution metrics +func (s *SQLiteStore) InsertSelfHealingExecutionMetrics(metrics types.SelfHealingExecutionMetric) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO self_healing_execution_metrics(id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + + _, err := s.db.Exec(insertQuery, metrics.TriggerID, metrics.ChallengeID, metrics.MessageType, metrics.Data, metrics.SenderID, metrics.SenderSignature, now, now) + if err != nil { + return err + } + + return nil +} + +// BatchInsertExecutionMetrics inserts execution metrics in a batch +func (s *SQLiteStore) BatchInsertExecutionMetrics(metrics []types.SelfHealingExecutionMetric) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO self_healing_execution_metrics + (id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) + VALUES (NULL,?,?,?,?,?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, metric := range metrics { + now := time.Now().UTC() + + _, err = stmt.Exec(metric.TriggerID, metric.ChallengeID, metric.MessageType, metric.Data, metric.SenderID, metric.SenderSignature, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} + +// GetSelfHealingExecutionMetrics retrieves all self_healing_execution_metrics records created after the specified timestamp. +func (s *SQLiteStore) GetSelfHealingExecutionMetrics(timestamp time.Time) ([]types.SelfHealingExecutionMetric, error) { + const query = ` + SELECT id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at + FROM self_healing_execution_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.SelfHealingExecutionMetric + for rows.Next() { + var m types.SelfHealingExecutionMetric + if err := rows.Scan(&m.ID, &m.TriggerID, &m.ChallengeID, &m.MessageType, &m.Data, &m.SenderID, &m.SenderSignature, &m.CreatedAt, &m.UpdatedAt); err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetSelfHealingGenerationMetrics retrieves all self_healing_generation_metrics records created after the specified timestamp. +func (s *SQLiteStore) GetSelfHealingGenerationMetrics(timestamp time.Time) ([]types.SelfHealingGenerationMetric, error) { + const query = ` + SELECT id, trigger_id, message_type, data, sender_id, sender_signature, created_at, updated_at + FROM self_healing_generation_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.SelfHealingGenerationMetric + for rows.Next() { + var m types.SelfHealingGenerationMetric + if err := rows.Scan(&m.ID, &m.TriggerID, &m.MessageType, &m.Data, &m.SenderID, &m.SenderSignature, &m.CreatedAt, &m.UpdatedAt); err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetLastNSCMetrics gets the N number of latest challenge IDs from the DB +func (s *SQLiteStore) GetLastNSCMetrics() ([]types.NScMetric, error) { + const query = ` +SELECT + count(*) AS count, + challenge_id, + MAX(created_at) AS most_recent +FROM + storage_challenge_metrics +GROUP BY + challenge_id +HAVING + count(*) > 5 +ORDER BY + most_recent DESC +LIMIT 20;` + + rows, err := s.db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.NScMetric + for rows.Next() { + var m types.NScMetric + err := rows.Scan(&m.Count, &m.ChallengeID, &m.CreatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetLastNHCMetrics gets the N number of latest health-check challenge IDs from the DB +func (s *SQLiteStore) GetLastNHCMetrics() ([]types.NHcMetric, error) { + const query = ` +SELECT + count(*) AS count, + challenge_id, + MAX(created_at) AS most_recent +FROM + healthcheck_challenge_metrics +GROUP BY + challenge_id +HAVING + count(*) > 5 +ORDER BY + most_recent DESC +LIMIT 20;` + + rows, err := s.db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.NHcMetric + for rows.Next() { + var m types.NHcMetric + err := rows.Scan(&m.Count, &m.ChallengeID, &m.CreatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetSHExecutionMetrics retrieves self-healing execution metrics +func (s *SQLiteStore) GetSHExecutionMetrics(ctx context.Context, from time.Time) (metrics.SHExecutionMetrics, error) { + m := metrics.SHExecutionMetrics{} + rows, err := s.GetSelfHealingExecutionMetrics(from) + if err != nil { + return m, err + } + log.WithContext(ctx).WithField("rows", len(rows)).Info("self-healing execution metrics row count") + + challenges := make(map[string]SHChallengeMetric) + for _, row := range rows { + if _, ok := challenges[row.ChallengeID]; !ok { + challenges[row.ChallengeID] = SHChallengeMetric{ + ChallengeID: row.ChallengeID, + } + } + + if row.MessageType == int(types.SelfHealingVerificationMessage) { + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return m, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w - row ID: %d", err, row.ID) + } + + if len(messages) >= minVerifications { + ch := challenges[row.ChallengeID] + ch.HasMinVerifications = true + challenges[row.ChallengeID] = ch + } + + reconReqVerified := 0 + reconNotReqVerified := 0 + reconReqUnverified := 0 + reconNotReqUnverified := 0 + reconReqHashMismatch := 0 + + for _, message := range messages { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsReconstructionRequired { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsReconstructionRequiredByHealer { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsVerified { + reconReqVerified++ + } else { + reconReqHashMismatch++ + } + } else { + reconNotReqUnverified++ + } + } else { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsReconstructionRequiredByHealer { + reconReqUnverified++ + } else { + reconNotReqVerified++ + } + } + } + + if reconReqVerified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsVerified = true + ch.IsReconstructionRequiredVerified = true + challenges[row.ChallengeID] = ch + } else if reconNotReqVerified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsVerified = true + ch.IsReconstructionNotRequiredVerified = true + challenges[row.ChallengeID] = ch + } else if reconReqUnverified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsUnverified = true + ch.IsReconstructionRequiredNotVerified = true + challenges[row.ChallengeID] = ch + } else if reconNotReqUnverified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsUnverified = true + ch.IsReconstructionNotRequiredNotVerified = true + challenges[row.ChallengeID] = ch + } else if reconReqHashMismatch >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsReconstructionRequiredHashMismatch = true + challenges[row.ChallengeID] = ch + } + + } else if row.MessageType == int(types.SelfHealingResponseMessage) { + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return m, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w - row ID: %d", err, row.ID) + } + if len(messages) == 0 { + return m, fmt.Errorf("len of selfhealing messages should not be 0 - problem with row ID %d", row.ID) + } + + data := messages[0].SelfHealingMessageData + + ch := challenges[row.ChallengeID] + if data.Response.RespondedTicket.IsReconstructionRequired { + ch.IsAccepted = true + } else { + ch.IsRejected = true + } + challenges[row.ChallengeID] = ch + + } else if row.MessageType == int(types.SelfHealingCompletionMessage) { + ch := challenges[row.ChallengeID] + ch.IsHealed = true + challenges[row.ChallengeID] = ch + } else if row.MessageType == int(types.SelfHealingAcknowledgementMessage) { + ch := challenges[row.ChallengeID] + ch.IsAck = true + challenges[row.ChallengeID] = ch + } + } + + log.WithContext(ctx).WithField("challenges", len(challenges)).Info("self-healing execution metrics challenges count") + + for _, challenge := range challenges { + log.WithContext(ctx).WithField("challenge-id", challenge.ChallengeID).WithField("is-accepted", challenge.IsAccepted). + WithField("is-verified", challenge.IsVerified).WithField("is-healed", challenge.IsHealed). + Info("self-healing challenge metric") + + if challenge.IsAck { + m.TotalChallengesAcknowledged++ + } + + if challenge.IsAccepted { + m.TotalChallengesAccepted++ + } + + if challenge.IsRejected { + m.TotalChallengesRejected++ + } + + if challenge.IsVerified { + m.TotalChallengeEvaluationsVerified++ + } + + if challenge.IsReconstructionRequiredVerified { + m.TotalReconstructionsApproved++ + } + + if challenge.IsReconstructionNotRequiredVerified { + m.TotalReconstructionsNotRquiredApproved++ + } + + if challenge.IsUnverified { + m.TotalChallengeEvaluationsUnverified++ + } + + if challenge.IsReconstructionRequiredNotVerified { + m.TotalReconstructionsNotApproved++ + } + + if challenge.IsReconstructionNotRequiredNotVerified { + m.TotalReconstructionsNotRequiredEvaluationNotApproved++ + } + + if challenge.IsReconstructionRequiredHashMismatch { + m.TotalReconstructionRequiredHashMismatch++ + } + + if challenge.IsHealed { + m.TotalFilesHealed++ + } + } + + return m, nil +} + +// QueryMetrics queries the self-healing metrics +func (s *SQLiteStore) QueryMetrics(ctx context.Context, from time.Time, _ *time.Time) (m metrics.Metrics, err error) { + genMetric, err := s.GetSelfHealingGenerationMetrics(from) + if err != nil { + return metrics.Metrics{}, err + } + + te := metrics.SHTriggerMetrics{} + challengesIssued := 0 + for _, metric := range genMetric { + t := metrics.SHTriggerMetric{} + data := types.SelfHealingMessages{} + if err := json.Unmarshal(metric.Data, &data); err != nil { + return metrics.Metrics{}, fmt.Errorf("cannot unmarshal self healing generation message type 3: %w", err) + } + + if len(data) < 1 { + return metrics.Metrics{}, fmt.Errorf("len of selfhealing messages data JSON should not be 0") + } + + t.TriggerID = metric.TriggerID + t.ListOfNodes = data[0].SelfHealingMessageData.Challenge.NodesOnWatchlist + t.TotalTicketsIdentified = len(data[0].SelfHealingMessageData.Challenge.ChallengeTickets) + + for _, ticket := range data[0].SelfHealingMessageData.Challenge.ChallengeTickets { + t.TotalFilesIdentified += len(ticket.MissingKeys) + } + + challengesIssued += t.TotalTicketsIdentified + + te = append(te, t) + } + + em, err := s.GetSHExecutionMetrics(ctx, from) + if err != nil { + return metrics.Metrics{}, fmt.Errorf("cannot get self healing execution metrics: %w", err) + } + + em.TotalChallengesIssued = challengesIssued + em.TotalFileHealingFailed = em.TotalReconstructionsApproved - em.TotalFilesHealed + + m.SHTriggerMetrics = te + + m.SHExecutionMetrics = em + + return m, nil +} + +// GetLastNSHChallenges retrieves the latest 'N' self-healing challenges +func (s *SQLiteStore) GetLastNSHChallenges(ctx context.Context, n int) (types.SelfHealingReports, error) { + challenges := types.SelfHealingReports{} + rows, err := s.GetSelfHealingExecutionMetrics(oneYearAgo) + if err != nil { + return challenges, err + } + log.WithContext(ctx).WithField("rows", len(rows)).Info("self-healing execution metrics row count") + + challengesInserted := 0 + for _, row := range rows { + if _, ok := challenges[row.ChallengeID]; !ok { + if challengesInserted == n { + continue + } + + challenges[row.ChallengeID] = types.SelfHealingReport{} + challengesInserted++ + } + + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return challenges, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w", err) + } + + msgType := types.SelfHealingMessageType(row.MessageType) + challenges[row.ChallengeID][msgType.String()] = messages + } + + return challenges, nil +} + +// GetSHChallengeReport returns the self-healing report +func (s *SQLiteStore) GetSHChallengeReport(ctx context.Context, challengeID string) (types.SelfHealingReports, error) { + challenges := types.SelfHealingReports{} + rows, err := s.GetSelfHealingExecutionMetrics(oneYearAgo) + if err != nil { + return challenges, err + } + log.WithContext(ctx).WithField("rows", len(rows)).Info("self-healing execution metrics row count") + + for _, row := range rows { + if row.ChallengeID == challengeID { + if _, ok := challenges[row.ChallengeID]; !ok { + challenges[row.ChallengeID] = types.SelfHealingReport{} + } + + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return challenges, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w", err) + } + + msgType := types.SelfHealingMessageType(row.MessageType) + challenges[row.ChallengeID][msgType.String()] = messages + } + } + + return challenges, nil +} + +// QuerySelfHealingChallenges retrieves self-healing audit logs stored in DB for self-healing +func (s *SQLiteStore) QuerySelfHealingChallenges() (challenges []types.SelfHealingChallenge, err error) { + const selectQuery = "SELECT * FROM self_healing_challenges" + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + challenge := types.SelfHealingChallenge{} + err = rows.Scan(&challenge.ID, &challenge.ChallengeID, &challenge.MerkleRoot, &challenge.FileHash, + &challenge.ChallengingNode, &challenge.RespondingNode, &challenge.VerifyingNode, &challenge.ReconstructedFileHash, + &challenge.Status, &challenge.CreatedAt, &challenge.UpdatedAt) + if err != nil { + return nil, err + } + + challenges = append(challenges, challenge) + } + + return challenges, nil +} + +// BatchInsertSelfHealingChallengeEvents inserts self-healing-challenge events in a batch +func (s *SQLiteStore) BatchInsertSelfHealingChallengeEvents(ctx context.Context, eventsBatch []types.SelfHealingChallengeEvent) error { + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO self_healing_challenge_events + (trigger_id, ticket_id, challenge_id, data, sender_id, is_processed, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + stmt2, err := tx.Prepare(` + INSERT OR IGNORE INTO self_healing_execution_metrics(id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) + VALUES(NULL,?,?,?,?,?,?,?,?); + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt2.Close() + + for _, event := range eventsBatch { + now := time.Now().UTC() + + _, err = stmt.Exec(event.TriggerID, event.TicketID, event.ChallengeID, event.Data, event.SenderID, false, now, now) + if err != nil { + tx.Rollback() + return err + } + + _, err = stmt2.Exec(event.ExecMetric.TriggerID, event.ExecMetric.ChallengeID, event.ExecMetric.MessageType, event.ExecMetric.Data, event.ExecMetric.SenderID, event.ExecMetric.SenderSignature, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + return tx.Commit() +} + +// GetSelfHealingChallengeEvents retrieves the challenge events from DB +func (s *SQLiteStore) GetSelfHealingChallengeEvents() ([]types.SelfHealingChallengeEvent, error) { + const selectQuery = ` + SELECT trigger_id, ticket_id, challenge_id, data, sender_id, is_processed, created_at, updated_at + FROM self_healing_challenge_events + WHERE is_processed = false + ` + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var events []types.SelfHealingChallengeEvent + + for rows.Next() { + var event types.SelfHealingChallengeEvent + if err := rows.Scan( + &event.TriggerID, &event.TicketID, &event.ChallengeID, &event.Data, &event.SenderID, &event.IsProcessed, + &event.CreatedAt, &event.UpdatedAt, + ); err != nil { + return nil, err + } + + events = append(events, event) + } + + return events, nil +} + +// UpdateSHChallengeEventProcessed updates the is_processed flag of an event +func (s *SQLiteStore) UpdateSHChallengeEventProcessed(challengeID string, isProcessed bool) error { + const updateQuery = ` + UPDATE self_healing_challenge_events + SET is_processed = ? + WHERE challenge_id = ? + ` + _, err := s.db.Exec(updateQuery, isProcessed, challengeID) + return err +} + +// CleanupSelfHealingChallenges cleans up self-healing challenges stored in DB for inspection +func (s *SQLiteStore) CleanupSelfHealingChallenges() (err error) { + const delQuery = "DELETE FROM self_healing_challenges" + _, err = s.db.Exec(delQuery) + return err +} diff --git a/pkg/storage/queries/sqlite.go b/pkg/storage/queries/sqlite.go new file mode 100644 index 00000000..c1d4cb02 --- /dev/null +++ b/pkg/storage/queries/sqlite.go @@ -0,0 +1,413 @@ +package queries + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/configurer" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/jmoiron/sqlx" + _ "github.com/mattn/go-sqlite3" //go-sqlite3 +) + +var ( + DefaulthPath = configurer.DefaultPath() +) + +const minVerifications = 3 +const createTaskHistory string = ` + CREATE TABLE IF NOT EXISTS task_history ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + time DATETIME NOT NULL, + task_id TEXT NOT NULL, + status TEXT NOT NULL + );` + +const alterTaskHistory string = `ALTER TABLE task_history ADD COLUMN details TEXT;` + +const createStorageChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS storage_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createBroadcastChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS broadcast_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + challenger TEXT NOT NULL, + recipient TEXT NOT NULL, + observers TEXT NOT NULL, + data BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createStorageChallengeMessagesUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS storage_challenge_messages_unique ON storage_challenge_messages(challenge_id, message_type, sender_id); +` + +const createSelfHealingChallenges string = ` + CREATE TABLE IF NOT EXISTS self_healing_challenges ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + merkleroot TEXT NOT NULL, + file_hash TEXT NOT NULL, + challenging_node TEXT NOT NULL, + responding_node TEXT NOT NULL, + verifying_node TEXT, + reconstructed_file_hash BLOB, + status TEXT NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL + );` + +const createPingHistory string = ` + CREATE TABLE IF NOT EXISTS ping_history ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + supernode_id TEXT UNIQUE NOT NULL, + ip_address TEXT UNIQUE NOT NULL, + total_pings INTEGER NOT NULL, + total_successful_pings INTEGER NOT NULL, + avg_ping_response_time FLOAT NOT NULL, + is_online BOOLEAN NOT NULL, + is_on_watchlist BOOLEAN NOT NULL, + is_adjusted BOOLEAN NOT NULL, + cumulative_response_time FLOAT NOT NULL, + last_seen DATETIME NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL + );` + +const createPingHistoryUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS ping_history_unique ON ping_history(supernode_id, ip_address); +` + +const createSelfHealingGenerationMetrics string = ` + CREATE TABLE IF NOT EXISTS self_healing_generation_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + trigger_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createSelfHealingGenerationMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS self_healing_generation_metrics_unique ON self_healing_generation_metrics(trigger_id); +` + +const createSelfHealingExecutionMetrics string = ` + CREATE TABLE IF NOT EXISTS self_healing_execution_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + trigger_id TEXT NOT NULL, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createSelfHealingChallengeTickets string = ` + CREATE TABLE IF NOT EXISTS self_healing_challenge_events ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + trigger_id TEXT NOT NULL, + ticket_id TEXT NOT NULL, + challenge_id TEXT NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + is_processed BOOLEAN NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +); +` + +const createSelfHealingChallengeTicketsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS self_healing_challenge_events_unique ON self_healing_challenge_events(trigger_id, ticket_id, challenge_id); +` + +const createSelfHealingExecutionMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS self_healing_execution_metrics_unique ON self_healing_execution_metrics(trigger_id, challenge_id, message_type); +` + +const alterTablePingHistory = `ALTER TABLE ping_history +ADD COLUMN metrics_last_broadcast_at DATETIME NULL;` + +const alterTablePingHistoryGenerationMetrics = `ALTER TABLE ping_history +ADD COLUMN generation_metrics_last_broadcast_at DATETIME NULL;` + +const alterTablePingHistoryExecutionMetrics = `ALTER TABLE ping_history +ADD COLUMN execution_metrics_last_broadcast_at DATETIME NULL;` + +const createStorageChallengeMetrics string = ` + CREATE TABLE IF NOT EXISTS storage_challenge_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createStorageChallengeMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS storage_challenge_metrics_unique ON storage_challenge_metrics(challenge_id, message_type, sender_id); +` + +const createHealthCheckChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS healthcheck_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createBroadcastHealthCheckChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS broadcast_healthcheck_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + challenger TEXT NOT NULL, + recipient TEXT NOT NULL, + observers TEXT NOT NULL, + data BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createHealthCheckChallengeMetrics string = ` + CREATE TABLE IF NOT EXISTS healthcheck_challenge_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +); +` +const createHealthCheckChallengeMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS healthcheck_challenge_metrics_unique ON healthcheck_challenge_metrics(challenge_id, message_type, sender_id); +` +const alterTablePingHistoryHealthCheckColumn = `ALTER TABLE ping_history +ADD COLUMN health_check_metrics_last_broadcast_at DATETIME NULL;` + +const createPingHistoryWithoutUniqueIPAddress string = ` +BEGIN TRANSACTION; + +CREATE TABLE IF NOT EXISTS new_ping_history ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + supernode_id TEXT UNIQUE NOT NULL, + ip_address TEXT NOT NULL, -- Removed UNIQUE constraint here + total_pings INTEGER NOT NULL, + total_successful_pings INTEGER NOT NULL, + avg_ping_response_time FLOAT NOT NULL, + is_online BOOLEAN NOT NULL, + is_on_watchlist BOOLEAN NOT NULL, + is_adjusted BOOLEAN NOT NULL, + cumulative_response_time FLOAT NOT NULL, + last_seen DATETIME NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL, + metrics_last_broadcast_at DATETIME, -- Assuming these columns already exist in the old table + generation_metrics_last_broadcast_at DATETIME, + execution_metrics_last_broadcast_at DATETIME, + health_check_metrics_last_broadcast_at DATETIME +); + +-- Step 2: Copy data including all columns from the old table +INSERT INTO new_ping_history ( + id, + supernode_id, + ip_address, + total_pings, + total_successful_pings, + avg_ping_response_time, + is_online, + is_on_watchlist, + is_adjusted, + cumulative_response_time, + last_seen, + created_at, + updated_at, + metrics_last_broadcast_at, + generation_metrics_last_broadcast_at, + execution_metrics_last_broadcast_at, + health_check_metrics_last_broadcast_at +) +SELECT + id, + supernode_id, + ip_address, + total_pings, + total_successful_pings, + avg_ping_response_time, + is_online, + is_on_watchlist, + is_adjusted, + cumulative_response_time, + last_seen, + created_at, + updated_at, + metrics_last_broadcast_at, + generation_metrics_last_broadcast_at, + execution_metrics_last_broadcast_at, + health_check_metrics_last_broadcast_at +FROM ping_history; + +-- Step 3: Drop the original table +DROP TABLE ping_history; + +-- Step 4: Rename the new table to the original table's name +ALTER TABLE new_ping_history RENAME TO ping_history; + +COMMIT; +` + +const ( + historyDBName = "history.db" + emptyString = "" +) + +// SQLiteStore handles sqlite ops +type SQLiteStore struct { + db *sqlx.DB +} + +// CloseHistoryDB closes history database +func (s *SQLiteStore) CloseHistoryDB(ctx context.Context) { + if err := s.db.Close(); err != nil { + log.WithContext(ctx).WithError(err).Error("error closing history db") + } +} + +// OpenHistoryDB opens history DB +func OpenHistoryDB() (LocalStoreInterface, error) { + dbFile := filepath.Join(DefaulthPath, historyDBName) + db, err := sqlx.Connect("sqlite3", dbFile) + if err != nil { + return nil, fmt.Errorf("cannot open sqlite database: %w", err) + } + + if _, err := db.Exec(createTaskHistory); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createStorageChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createStorageChallengeMessagesUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot execute migration: %w", err) + } + + if _, err := db.Exec(createBroadcastChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot execute migration: %w", err) + } + + if _, err := db.Exec(createSelfHealingChallenges); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createPingHistory); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createPingHistoryUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingGenerationMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingGenerationMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingExecutionMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingExecutionMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingChallengeTickets); err != nil { + return nil, fmt.Errorf("cannot create createSelfHealingChallengeTickets: %w", err) + } + + if _, err := db.Exec(createSelfHealingChallengeTicketsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create createSelfHealingChallengeTicketsUniqueIndex: %w", err) + } + + if _, err := db.Exec(createStorageChallengeMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createStorageChallengeMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createHealthCheckChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createHealthCheckChallengeMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createHealthCheckChallengeMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createBroadcastHealthCheckChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + _, _ = db.Exec(alterTaskHistory) + + _, _ = db.Exec(alterTablePingHistory) + + _, _ = db.Exec(alterTablePingHistoryGenerationMetrics) + + _, _ = db.Exec(alterTablePingHistoryExecutionMetrics) + + _, _ = db.Exec(alterTablePingHistoryHealthCheckColumn) + + _, err = db.Exec(createPingHistoryWithoutUniqueIPAddress) + if err != nil { + log.WithError(err).Error("error executing ping-history w/o unique ip-address constraint migration") + } + + pragmas := []string{ + "PRAGMA synchronous=NORMAL;", + "PRAGMA cache_size=-262144;", + "PRAGMA busy_timeout=120000;", + "PRAGMA journal_mode=WAL;", + } + + for _, pragma := range pragmas { + if _, err := db.Exec(pragma); err != nil { + return nil, fmt.Errorf("cannot set sqlite database parameter: %w", err) + } + } + + return &SQLiteStore{ + db: db, + }, nil +} diff --git a/pkg/storage/queries/storage_challenge.go b/pkg/storage/queries/storage_challenge.go new file mode 100644 index 00000000..204fd0cc --- /dev/null +++ b/pkg/storage/queries/storage_challenge.go @@ -0,0 +1,493 @@ +package queries + +import ( + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + "github.com/LumeraProtocol/supernode/pkg/utils/metrics" + + json "github.com/json-iterator/go" +) + +const batchSizeForChallengeIDsRetrieval = 500 + +type StorageChallengeQueries interface { + InsertStorageChallengeMessage(challenge types.StorageChallengeLogMessage) error + InsertBroadcastMessage(challenge types.BroadcastLogMessage) error + QueryStorageChallengeMessage(challengeID string, messageType int) (challenge types.StorageChallengeLogMessage, err error) + CleanupStorageChallenges() (err error) + GetStorageChallengeMetricsByChallengeID(challengeID string) ([]types.StorageChallengeLogMessage, error) + GetMetricsByChallengeIDAndMessageType(challengeID string, messageType types.MessageType) ([]types.StorageChallengeLogMessage, error) + + BatchInsertSCMetrics(metrics []types.StorageChallengeLogMessage) error + StorageChallengeMetrics(timestamp time.Time) ([]types.StorageChallengeLogMessage, error) + InsertStorageChallengeMetric(metric types.StorageChallengeMetric) error + GetSCSummaryStats(from time.Time) (scMetrics metrics.SCMetrics, err error) + GetTotalSCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.SCMetrics, error) + GetChallengerEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) + GetObserversEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) + GetMetricsDataByStorageChallengeID(ctx context.Context, challengeID string) ([]types.Message, error) + GetLastNSCMetrics() ([]types.NScMetric, error) + GetDistinctChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) + GetDistinctChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) + BatchInsertScoreAggregationChallenges(challengeIDs []string, isAggregated bool) error +} + +// InsertStorageChallengeMessage inserts failed storage challenge to db +func (s *SQLiteStore) InsertStorageChallengeMessage(challenge types.StorageChallengeLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO storage_challenge_messages(id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.MessageType, challenge.Data, challenge.Sender, challenge.SenderSignature, now, now) + if err != nil { + return err + } + + return nil +} + +func (s *SQLiteStore) InsertStorageChallengeMetric(m types.StorageChallengeMetric) error { + now := time.Now().UTC() + + const metricsQuery = "INSERT INTO storage_challenge_metrics(id, challenge_id, message_type, data, sender_id, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(metricsQuery, m.ChallengeID, m.MessageType, m.Data, m.SenderID, now, now) + if err != nil { + return err + } + + return nil +} + +func (s *SQLiteStore) BatchInsertSCMetrics(metrics []types.StorageChallengeLogMessage) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO storage_challenge_metrics + (id, challenge_id, message_type, data, sender_id, created_at, updated_at) + VALUES (NULL,?,?,?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, metric := range metrics { + now := time.Now().UTC() + + _, err = stmt.Exec(metric.ChallengeID, metric.MessageType, metric.Data, metric.Sender, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} + +func (s *SQLiteStore) GetMetricsDataByStorageChallengeID(ctx context.Context, challengeID string) (storageChallengeMessages []types.Message, err error) { + scMetrics, err := s.GetStorageChallengeMetricsByChallengeID(challengeID) + if err != nil { + return storageChallengeMessages, err + } + log.WithContext(ctx).WithField("rows", len(scMetrics)).Info("storage-challenge metrics row count") + + for _, scMetric := range scMetrics { + msg := types.MessageData{} + if err := json.Unmarshal(scMetric.Data, &msg); err != nil { + return storageChallengeMessages, fmt.Errorf("cannot unmarshal storage challenge data: %w", err) + } + + storageChallengeMessages = append(storageChallengeMessages, types.Message{ + ChallengeID: scMetric.ChallengeID, + MessageType: types.MessageType(scMetric.MessageType), + Sender: scMetric.Sender, + SenderSignature: scMetric.SenderSignature, + Data: msg, + }) + } + + return storageChallengeMessages, nil +} + +func (s *SQLiteStore) GetTotalSCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.SCMetrics, error) { + metrics := metrics.SCMetrics{} + + // Query for total number of challenges + totalChallengeQuery := "SELECT COUNT(DISTINCT challenge_id) FROM storage_challenge_metrics WHERE message_type = 1 AND created_at > ?" + err := s.db.QueryRow(totalChallengeQuery, from).Scan(&metrics.TotalChallenges) + if err != nil { + return metrics, err + } + + // Query for total challenges responded + totalChallengesProcessedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM storage_challenge_metrics WHERE message_type = 2 AND created_at > ?" + err = s.db.QueryRow(totalChallengesProcessedQuery, from).Scan(&metrics.TotalChallengesProcessed) + if err != nil { + return metrics, err + } + + totalChallengesEvaluatedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM storage_challenge_metrics WHERE message_type = 3 AND created_at > ?" + err = s.db.QueryRow(totalChallengesEvaluatedQuery, from).Scan(&metrics.TotalChallengesEvaluatedByChallenger) + if err != nil { + return metrics, err + } + + return metrics, nil +} + +func (s *SQLiteStore) GetChallengerEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) { + var messages []types.StorageChallengeLogMessage + + query := "SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at FROM storage_challenge_metrics WHERE message_type = 3 and created_at > ?" + rows, err := s.db.Query(query, from) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var msg types.StorageChallengeLogMessage + err := rows.Scan(&msg.ID, &msg.ChallengeID, &msg.MessageType, &msg.Data, &msg.Sender, &msg.CreatedAt, &msg.UpdatedAt) + if err != nil { + return nil, err + } + messages = append(messages, msg) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return messages, nil +} + +func (s *SQLiteStore) GetObserversEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) { + var messages []types.StorageChallengeLogMessage + + query := "SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at FROM storage_challenge_metrics WHERE message_type = 4 and created_at > ?" + rows, err := s.db.Query(query, from) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var msg types.StorageChallengeLogMessage + err := rows.Scan(&msg.ID, &msg.ChallengeID, &msg.MessageType, &msg.Data, &msg.Sender, &msg.CreatedAt, &msg.UpdatedAt) + if err != nil { + return nil, err + } + messages = append(messages, msg) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return messages, nil +} + +func (s *SQLiteStore) GetSCSummaryStats(from time.Time) (scMetrics metrics.SCMetrics, err error) { + scStats := metrics.SCMetrics{} + scMetrics, err = s.GetTotalSCGeneratedAndProcessedAndEvaluated(from) + if err != nil { + return scMetrics, err + } + scStats.TotalChallenges = scMetrics.TotalChallenges + scStats.TotalChallengesProcessed = scMetrics.TotalChallengesProcessed + scStats.TotalChallengesEvaluatedByChallenger = scMetrics.TotalChallengesEvaluatedByChallenger + + observersEvaluations, err := s.GetObserversEvaluations(from) + if err != nil { + return scMetrics, err + } + log.WithField("observer_evaluations", len(observersEvaluations)).Info("observer evaluations retrieved") + + observerEvaluationMetrics := processObserverEvaluations(observersEvaluations) + log.WithField("observer_evaluation_metrics", len(observerEvaluationMetrics)).Info("observer evaluation metrics retrieved") + + for _, obMetrics := range observerEvaluationMetrics { + if obMetrics.ChallengesVerified > 2 { + scMetrics.TotalChallengesVerified++ + } else { + if obMetrics.FailedByInvalidTimestamps > 0 { + scMetrics.SlowResponsesObservedByObservers++ + } + if obMetrics.FailedByInvalidSignatures > 0 { + scMetrics.InvalidSignaturesObservedByObservers++ + } + if obMetrics.FailedByInvalidEvaluation > 0 { + scMetrics.InvalidEvaluationObservedByObservers++ + } + } + } + + return scMetrics, nil +} + +// InsertBroadcastMessage inserts broadcast storage challenge msg to db +func (s *SQLiteStore) InsertBroadcastMessage(challenge types.BroadcastLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO broadcast_challenge_messages(id, challenge_id, data, challenger, recipient, observers, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?);" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.Data, challenge.Challenger, challenge.Recipient, challenge.Observers, now, now) + if err != nil { + return err + } + + return nil +} + +// StorageChallengeMetrics retrieves all the metrics needs to be broadcast +func (s *SQLiteStore) StorageChallengeMetrics(timestamp time.Time) ([]types.StorageChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM storage_challenge_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.StorageChallengeLogMessage + for rows.Next() { + var m types.StorageChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// QueryStorageChallengeMessage retrieves storage challenge message against challengeID and messageType +func (s *SQLiteStore) QueryStorageChallengeMessage(challengeID string, messageType int) (challengeMessage types.StorageChallengeLogMessage, err error) { + const selectQuery = "SELECT * FROM storage_challenge_messages WHERE challenge_id=? AND message_type=?" + err = s.db.QueryRow(selectQuery, challengeID, messageType).Scan( + &challengeMessage.ID, &challengeMessage.ChallengeID, &challengeMessage.MessageType, &challengeMessage.Data, + &challengeMessage.Sender, &challengeMessage.SenderSignature, &challengeMessage.CreatedAt, &challengeMessage.UpdatedAt) + + if err != nil { + return challengeMessage, err + } + + return challengeMessage, nil +} + +// CleanupStorageChallenges cleans up challenges stored in DB for self-healing +func (s *SQLiteStore) CleanupStorageChallenges() (err error) { + const delQuery = "DELETE FROM storage_challenge_messages" + _, err = s.db.Exec(delQuery) + return err +} + +// GetStorageChallengeMetricsByChallengeID retrieves all the metrics +func (s *SQLiteStore) GetStorageChallengeMetricsByChallengeID(challengeID string) ([]types.StorageChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM storage_challenge_metrics + WHERE challenge_id = ?;` + + rows, err := s.db.Query(query, challengeID) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.StorageChallengeLogMessage + for rows.Next() { + var m types.StorageChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetMetricsByChallengeIDAndMessageType retrieves all the metrics by challengeID and messageType +func (s *SQLiteStore) GetMetricsByChallengeIDAndMessageType(challengeID string, messageType types.MessageType) ([]types.StorageChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM storage_challenge_metrics + WHERE challenge_id = ? + AND message_type = ?;` + + rows, err := s.db.Query(query, challengeID, int(messageType)) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.StorageChallengeLogMessage + for rows.Next() { + var m types.StorageChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +func processObserverEvaluations(observersEvaluations []types.StorageChallengeLogMessage) map[string]ObserverEvaluationMetrics { + evaluationMap := make(map[string]ObserverEvaluationMetrics) + + for _, observerEvaluation := range observersEvaluations { + var oe types.MessageData + if err := json.Unmarshal(observerEvaluation.Data, &oe); err != nil { + continue + } + + oem, exists := evaluationMap[observerEvaluation.ChallengeID] + if !exists { + oem = ObserverEvaluationMetrics{} // Initialize if not exists + } + + if isObserverEvaluationVerified(oe.ObserverEvaluation) { + oem.ChallengesVerified++ + } else { + if !oe.ObserverEvaluation.IsChallengeTimestampOK || + !oe.ObserverEvaluation.IsProcessTimestampOK || + !oe.ObserverEvaluation.IsEvaluationTimestampOK { + oem.FailedByInvalidTimestamps++ + } + + if !oe.ObserverEvaluation.IsChallengerSignatureOK || + !oe.ObserverEvaluation.IsRecipientSignatureOK { + oem.FailedByInvalidSignatures++ + } + + if !oe.ObserverEvaluation.IsEvaluationResultOK { + oem.FailedByInvalidEvaluation++ + } + } + + evaluationMap[observerEvaluation.ChallengeID] = oem + } + + return evaluationMap +} + +func isObserverEvaluationVerified(observerEvaluation types.ObserverEvaluationData) bool { + if !observerEvaluation.IsEvaluationResultOK { + return false + } + + if !observerEvaluation.IsChallengerSignatureOK { + return false + } + + if !observerEvaluation.IsRecipientSignatureOK { + return false + } + + if !observerEvaluation.IsChallengeTimestampOK { + return false + } + + if !observerEvaluation.IsProcessTimestampOK { + return false + } + + if !observerEvaluation.IsEvaluationTimestampOK { + return false + } + + return true +} + +// GetDistinctChallengeIDsCountForScoreAggregation gets the count of distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) { + query := ` + SELECT COUNT(DISTINCT challenge_id) + FROM storage_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + ` + + var challengeIDsCount int + err := s.db.QueryRow(query, after, before).Scan(&challengeIDsCount) + if err != nil { + return 0, err + } + + return challengeIDsCount, nil +} + +// GetDistinctChallengeIDs retrieves the distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) { + offset := batchNumber * batchSizeForChallengeIDsRetrieval + + query := ` + SELECT DISTINCT challenge_id + FROM storage_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + LIMIT ? OFFSET ? + ` + + rows, err := s.db.Query(query, after, before, batchSizeForChallengeIDsRetrieval, offset) + if err != nil { + return nil, err + } + defer rows.Close() + + var challengeIDs []string + for rows.Next() { + var challengeID string + if err := rows.Scan(&challengeID); err != nil { + return nil, err + } + challengeIDs = append(challengeIDs, challengeID) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return challengeIDs, nil +} + +// BatchInsertScoreAggregationChallenges inserts the batch of challenge ids for score aggregation +func (s *SQLiteStore) BatchInsertScoreAggregationChallenges(challengeIDs []string, isAggregated bool) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO sc_score_aggregation_queue + (challenge_id, is_aggregated, created_at, updated_at) + VALUES (?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, id := range challengeIDs { + now := time.Now().UTC() + + _, err = stmt.Exec(id, isAggregated, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} diff --git a/pkg/storage/queries/task_history.go b/pkg/storage/queries/task_history.go new file mode 100644 index 00000000..28a8572c --- /dev/null +++ b/pkg/storage/queries/task_history.go @@ -0,0 +1,70 @@ +package queries + +import ( + "fmt" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + + json "github.com/json-iterator/go" +) + +type TaskHistoryQueries interface { + InsertTaskHistory(history types.TaskHistory) (int, error) + QueryTaskHistory(taskID string) (history []types.TaskHistory, err error) +} + +// InsertTaskHistory inserts task history +func (s *SQLiteStore) InsertTaskHistory(history types.TaskHistory) (hID int, err error) { + var stringifyDetails string + if history.Details != nil { + stringifyDetails = history.Details.Stringify() + } + + const insertQuery = "INSERT INTO task_history(id, time, task_id, status, details) VALUES(NULL,?,?,?,?);" + res, err := s.db.Exec(insertQuery, history.CreatedAt, history.TaskID, history.Status, stringifyDetails) + + if err != nil { + return 0, err + } + + var id int64 + if id, err = res.LastInsertId(); err != nil { + return 0, err + } + + return int(id), nil +} + +// QueryTaskHistory gets task history by taskID +func (s *SQLiteStore) QueryTaskHistory(taskID string) (history []types.TaskHistory, err error) { + const selectQuery = "SELECT * FROM task_history WHERE task_id = ? LIMIT 100" + rows, err := s.db.Query(selectQuery, taskID) + if err != nil { + return nil, err + } + defer rows.Close() + + var data []types.TaskHistory + for rows.Next() { + i := types.TaskHistory{} + var details string + err = rows.Scan(&i.ID, &i.CreatedAt, &i.TaskID, &i.Status, &details) + if err != nil { + return nil, err + } + + if details != emptyString { + err = json.Unmarshal([]byte(details), &i.Details) + if err != nil { + log.Info(details) + log.WithError(err).Error(fmt.Sprintf("cannot unmarshal task history details: %s", details)) + i.Details = nil + } + } + + data = append(data, i) + } + + return data, nil +} diff --git a/pkg/storage/rqstore/store.go b/pkg/storage/rqstore/store.go index f6fe52f2..bc62a40a 100644 --- a/pkg/storage/rqstore/store.go +++ b/pkg/storage/rqstore/store.go @@ -41,7 +41,7 @@ type SymbolDir struct { func NewSQLiteRQStore(file string) (*SQLiteRQStore, error) { db, err := sqlx.Connect("sqlite3", file) if err != nil { - return nil, fmt.Errorf("cannot open rq-service database: %w", err) + return nil, fmt.Errorf("cannot open rq-services database: %w", err) } // Create the rq_symbols_dir table if it doesn't exist diff --git a/pkg/types/healthcheck_challenge.go b/pkg/types/healthcheck_challenge.go new file mode 100644 index 00000000..a171138b --- /dev/null +++ b/pkg/types/healthcheck_challenge.go @@ -0,0 +1,164 @@ +package types + +import ( + "encoding/json" + "time" + + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +// HealthCheckMessageType represents the type of message sent in the health-check process +type HealthCheckMessageType int + +const ( + // HealthCheckChallengeMessageType represents the challenge message + HealthCheckChallengeMessageType HealthCheckMessageType = iota + 1 + // HealthCheckResponseMessageType represents the response message + HealthCheckResponseMessageType + // HealthCheckEvaluationMessageType represents the evaluation message + HealthCheckEvaluationMessageType + // HealthCheckAffirmationMessageType represents the affirmation message + HealthCheckAffirmationMessageType + // HealthCheckBroadcastMessageType represents the broadcast message + HealthCheckBroadcastMessageType +) + +// String returns the message string +func (hcm HealthCheckMessageType) String() string { + switch hcm { + case HealthCheckChallengeMessageType: + return "challenge" + case HealthCheckResponseMessageType: + return "response" + case HealthCheckEvaluationMessageType: + return "evaluation" + case HealthCheckAffirmationMessageType: + return "affirmation" + default: + return "unknown" + } +} + +// BroadcastHealthCheckMessage represents the healthcheck challenge message that needs to be broadcast after evaluation +type BroadcastHealthCheckMessage struct { + ChallengeID string + Challenger map[string][]byte + Recipient map[string][]byte + Observers map[string][]byte +} + +// BroadcastHealthCheckLogMessage represents the broadcast message log to be stored in the DB +type BroadcastHealthCheckLogMessage struct { + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` +} + +// HealthCheckChallengeData represents the data of challenge +type HealthCheckChallengeData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` +} + +// HealthCheckResponseData represents the data of response +type HealthCheckResponseData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` +} + +// HealthCheckEvaluationData represents the data of evaluation +type HealthCheckEvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + IsVerified bool `json:"is_verified"` +} + +// HealthCheckObserverEvaluationData represents the data of Observer's evaluation +type HealthCheckObserverEvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + IsChallengeTimestampOK bool `json:"is_challenge_timestamp_ok"` + IsProcessTimestampOK bool `json:"is_process_timestamp_ok"` + IsEvaluationTimestampOK bool `json:"is_evaluation_timestamp_ok"` + IsRecipientSignatureOK bool `json:"is_recipient_signature_ok"` + IsChallengerSignatureOK bool `json:"is_challenger_signature_ok"` + IsEvaluationResultOK bool `json:"is_evaluation_result_ok"` + Timestamp time.Time `json:"timestamp"` +} + +// HealthCheckMessageData represents the health check challenge message data +type HealthCheckMessageData struct { + ChallengerID string `json:"challenger_id"` + Challenge HealthCheckChallengeData `json:"challenge"` + Observers []string `json:"observers"` + RecipientID string `json:"recipient_id"` + Response HealthCheckResponseData `json:"response"` + ChallengerEvaluation HealthCheckEvaluationData `json:"challenger_evaluation"` + ObserverEvaluation HealthCheckObserverEvaluationData `json:"observer_evaluation"` +} + +// HealthCheckMessage represents the healthcheck challenge message +type HealthCheckMessage struct { + MessageType HealthCheckMessageType `json:"message_type"` + ChallengeID string `json:"challenge_id"` + Data HealthCheckMessageData `json:"data"` + Sender string `json:"sender"` + SenderSignature []byte `json:"sender_signature"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// HealthCheckChallengeMetric represents the metric log to be stored in the DB +type HealthCheckChallengeMetric struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` +} + +// HealthCheckChallengeLogMessage represents the message log to be stored in the DB +type HealthCheckChallengeLogMessage struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + Sender string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// BroadcastHealthCheckMessageMetrics is the struct for broadcast message metrics +type BroadcastHealthCheckMessageMetrics struct { + ID int `db:"id"` + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// ProcessBroadcastHealthCheckChallengeMetricsRequest represents the request for broadcasting metrics +type ProcessBroadcastHealthCheckChallengeMetricsRequest struct { + Data []byte `json:"data"` + SenderID string `json:"sender_id"` +} + +// HealthCheckChallengeMessages represents an array of health-check message +type HealthCheckChallengeMessages []HealthCheckMessage + +// Hash returns the hash of the health-check-challenge challenge log data +func (mdl HealthCheckChallengeMessages) Hash() string { + data, _ := json.Marshal(mdl) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} diff --git a/pkg/types/self_healing.go b/pkg/types/self_healing.go new file mode 100644 index 00000000..0ba13081 --- /dev/null +++ b/pkg/types/self_healing.go @@ -0,0 +1,252 @@ +package types + +import ( + "database/sql" + "encoding/json" + "time" + + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +// SelfHealingMessageType represents the type of message sent in the self-healing process +type SelfHealingMessageType int + +const ( + // SelfHealingChallengeMessage represents the challenge message + SelfHealingChallengeMessage SelfHealingMessageType = iota + 1 + // SelfHealingResponseMessage represents the response message + SelfHealingResponseMessage + // SelfHealingVerificationMessage represents the verification message + SelfHealingVerificationMessage + // SelfHealingCompletionMessage represents the challenge message processed successfully + SelfHealingCompletionMessage + // SelfHealingAcknowledgementMessage represents the acknowledgement message + SelfHealingAcknowledgementMessage +) + +func (s SelfHealingMessageType) String() string { + messages := [...]string{"", "challenge", "response", "verification", "completion", "acknowledgement"} + if s < 1 || int(s) >= len(messages) { + return "unknown" + } + + return messages[s] +} + +// TicketType represents the type of ticket; nft, cascade, sense +type TicketType int + +const ( + // TicketTypeCascade represents the cascade ticket type + TicketTypeCascade TicketType = iota + 1 + // TicketTypeSense represents the sense ticket type + TicketTypeSense + // TicketTypeNFT represents the NFT ticket type + TicketTypeNFT +) + +func (t TicketType) String() string { + tickets := [...]string{"", "cascade", "sense", "nft"} + if t < 1 || int(t) >= len(tickets) { + return "unknown" + } + + return tickets[t] +} + +// PingInfo represents the structure of data to be inserted into the ping_history table +type PingInfo struct { + ID int `db:"id"` + SupernodeID string `db:"supernode_id"` + IPAddress string `db:"ip_address"` + TotalPings int `db:"total_pings"` + TotalSuccessfulPings int `db:"total_successful_pings"` + AvgPingResponseTime float64 `db:"avg_ping_response_time"` + IsOnline bool `db:"is_online"` + IsOnWatchlist bool `db:"is_on_watchlist"` + IsAdjusted bool `db:"is_adjusted"` + CumulativeResponseTime float64 `db:"cumulative_response_time"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` + LastSeen sql.NullTime `db:"last_seen"` + MetricsLastBroadcastAt sql.NullTime `db:"metrics_last_broadcast_at"` + HealthCheckMetricsLastBroadcastAt sql.NullTime `db:"health_check_metrics_last_broadcast_at"` + GenerationMetricsLastBroadcastAt sql.NullTime `db:"generation_metrics_last_broadcast_at"` + ExecutionMetricsLastBroadcastAt sql.NullTime `db:"execution_metrics_last_broadcast_at"` + SCScoreLastAggregatedAt sql.NullTime `db:"sc_score_last_aggregated_at"` + LastResponseTime float64 `db:"-"` +} + +// PingInfos represents array of ping info +type PingInfos []PingInfo + +// SelfHealingReports represents the self-healing metrics for each challenge +type SelfHealingReports map[string]SelfHealingReport + +// SelfHealingReport represents the self-healing challenges +type SelfHealingReport map[string]SelfHealingMessages + +// SelfHealingMessages represents the self-healing metrics for each challenge = message_type = 3 +type SelfHealingMessages []SelfHealingMessage + +// SelfHealingMessage represents the self-healing message +type SelfHealingMessage struct { + TriggerID string `json:"trigger_id"` + MessageType SelfHealingMessageType `json:"message_type"` + SelfHealingMessageData SelfHealingMessageData `json:"data"` + SenderID string `json:"sender_id"` + SenderSignature []byte `json:"sender_signature"` +} + +// SelfHealingMessageData represents the self-healing message data == message_type = 2 +type SelfHealingMessageData struct { + ChallengerID string `json:"challenger_id"` + RecipientID string `json:"recipient_id"` + Challenge SelfHealingChallengeData `json:"challenge"` + Response SelfHealingResponseData `json:"response"` + Verification SelfHealingVerificationData `json:"verification"` +} + +// SelfHealingChallengeData represents the challenge data for self-healing sent by the challenger +type SelfHealingChallengeData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + ChallengeTickets []ChallengeTicket `json:"challenge_tickets"` + NodesOnWatchlist string `json:"nodes_on_watchlist"` +} + +// ChallengeTicket represents the ticket details for self-healing challenge +type ChallengeTicket struct { + TxID string `json:"tx_id"` + TicketType TicketType `json:"ticket_type"` + MissingKeys []string `json:"missing_keys"` + DataHash []byte `json:"data_hash"` + Recipient string `json:"recipient"` +} + +// RespondedTicket represents the details of ticket responded in a self-healing challenge +type RespondedTicket struct { + TxID string `json:"tx_id"` + TicketType TicketType `json:"ticket_type"` + MissingKeys []string `json:"missing_keys"` + ReconstructedFileHash []byte `json:"reconstructed_file_hash"` + IsReconstructionRequired bool `json:"is_reconstruction_required"` + Error string `json:"error"` +} + +// SelfHealingResponseData represents the response data for self-healing sent by the recipient +type SelfHealingResponseData struct { + ChallengeID string `json:"challenge_id"` + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + RespondedTicket RespondedTicket `json:"responded_ticket"` + Verifiers []string `json:"verifiers"` +} + +// VerifiedTicket represents the details of ticket verified in self-healing challenge +type VerifiedTicket struct { + TxID string `json:"tx_id"` + TicketType TicketType `json:"ticket_type"` + MissingKeys []string `json:"missing_keys"` + ReconstructedFileHash []byte `json:"reconstructed_file_hash"` + IsReconstructionRequired bool `json:"is_reconstruction_required"` + IsReconstructionRequiredByHealer bool `json:"is_reconstruction_required_by_healer"` + IsVerified bool `json:"is_verified"` + Message string `json:"message"` +} + +// SelfHealingVerificationData represents the verification data for self-healing challenge +type SelfHealingVerificationData struct { + NodeID string `json:"node_id"` + NodeAddress string `json:"node_address"` + ChallengeID string `json:"challenge_id"` + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + VerifiedTicket VerifiedTicket `json:"verified_ticket"` + VerifiersData map[string][]byte `json:"verifiers_data"` +} + +// SelfHealingGenerationMetric represents the self-healing generation metrics for trigger events +type SelfHealingGenerationMetric struct { + ID int `db:"id"` + TriggerID string `db:"trigger_id"` + MessageType int `db:"message_type"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// CombinedSelfHealingMetrics represents the combination of generation and execution metrics +type CombinedSelfHealingMetrics struct { + GenerationMetrics []SelfHealingGenerationMetric + ExecutionMetrics []SelfHealingExecutionMetric +} + +// SelfHealingExecutionMetric represents the self-healing execution metrics for trigger events +type SelfHealingExecutionMetric struct { + ID int `db:"id"` + TriggerID string `db:"trigger_id"` + ChallengeID string `db:"challenge_id"` + MessageType int `db:"message_type"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// SelfHealingMetricType represents the type of self-healing metric +type SelfHealingMetricType int + +const ( + // GenerationSelfHealingMetricType represents the generation metric for self-healing + GenerationSelfHealingMetricType SelfHealingMetricType = 1 + // ExecutionSelfHealingMetricType represents the execution metric for self-healing + ExecutionSelfHealingMetricType SelfHealingMetricType = 2 +) + +// ProcessBroadcastMetricsRequest represents the request for broadcasting metrics +type ProcessBroadcastMetricsRequest struct { + Data []byte `json:"data"` + Type SelfHealingMetricType `json:"type"` + SenderID string `json:"sender_id"` + SenderSignature []byte `json:"sender_signature"` +} + +// SelfHealingMetrics represents the self-healing metrics for each challenge +type SelfHealingMetrics struct { + ChallengeID string `db:"challenge_id"` + SentTicketsForSelfHealing int `db:"sent_tickets_for_self_healing"` + EstimatedMissingKeys int `db:"estimated_missing_keys"` + TicketsInProgress int `db:"tickets_in_progress"` + TicketsRequiredSelfHealing int `db:"tickets_required_self_healing"` + SuccessfullySelfHealedTickets int `db:"successfully_self_healed_tickets"` + SuccessfullyVerifiedTickets int `db:"successfully_verified_tickets"` +} + +// SelfHealingChallengeEvent represents the challenge event that needs to be healed. +type SelfHealingChallengeEvent struct { + ID int64 + TriggerID string + ChallengeID string + TicketID string + Data []byte + SenderID string + IsProcessed bool + ExecMetric SelfHealingExecutionMetric + CreatedAt time.Time + UpdatedAt time.Time +} + +// Hash returns the hash of the self-healing challenge reports +func (s SelfHealingReports) Hash() string { + data, _ := json.Marshal(s) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} diff --git a/pkg/types/storage_challenge.go b/pkg/types/storage_challenge.go new file mode 100644 index 00000000..2b0432e1 --- /dev/null +++ b/pkg/types/storage_challenge.go @@ -0,0 +1,249 @@ +package types + +import ( + "encoding/json" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +// MessageType represents the type of message +type MessageType int + +const ( + // ChallengeMessageType represents the challenge message + ChallengeMessageType MessageType = iota + 1 + // ResponseMessageType represents the response message + ResponseMessageType + // EvaluationMessageType represents the evaluation message + EvaluationMessageType + // AffirmationMessageType represents the affirmation message + AffirmationMessageType + //BroadcastMessageType represents the message that needs to be broadcast + BroadcastMessageType +) + +// String returns the message string +func (m MessageType) String() string { + switch m { + case ChallengeMessageType: + return "challenge" + case ResponseMessageType: + return "response" + case EvaluationMessageType: + return "evaluation" + case AffirmationMessageType: + return "affirmation" + default: + return "unknown" + } +} + +// MessageTypeFromString returns the message type from string +func MessageTypeFromString(str string) (MessageType, error) { + switch str { + case "challenge": + return ChallengeMessageType, nil + case "response": + return ResponseMessageType, nil + case "evaluation": + return EvaluationMessageType, nil + case "affirmation": + return AffirmationMessageType, nil + default: + return 0, errors.New("invalid message type string") + } +} + +// StorageChallengeSignatures represents the signature struct for broadcasting +type StorageChallengeSignatures struct { + Challenger map[string]string `json:"challenger,omitempty"` + Recipient map[string]string `json:"recipient,omitempty"` + Obs map[string]string `json:"obs,omitempty"` +} + +// Message represents the storage challenge message +type Message struct { + MessageType MessageType `json:"message_type"` + ChallengeID string `json:"challenge_id"` + Data MessageData `json:"data"` + Sender string `json:"sender"` + SenderSignature []byte `json:"sender_signature"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// BroadcastMessage represents the storage challenge message that needs to be broadcast after evaluation +type BroadcastMessage struct { + ChallengeID string + Challenger map[string][]byte + Recipient map[string][]byte + Observers map[string][]byte +} + +type MessageDataList []MessageData + +// MessageData represents the storage challenge message data +type MessageData struct { + ChallengerID string `json:"challenger_id"` + Challenge ChallengeData `json:"challenge"` + Observers []string `json:"observers"` + RecipientID string `json:"recipient_id"` + Response ResponseData `json:"response"` + ChallengerEvaluation EvaluationData `json:"challenger_evaluation"` + ObserverEvaluation ObserverEvaluationData `json:"observer_evaluation"` +} + +// ChallengeData represents the data of challenge +type ChallengeData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + FileHash string `json:"file_hash"` + StartIndex int `json:"start_index"` + EndIndex int `json:"end_index"` +} + +// ResponseData represents the data of response +type ResponseData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Hash string `json:"hash"` + Timestamp time.Time `json:"timestamp"` +} + +// EvaluationData represents the data of evaluation +type EvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + Hash string `json:"hash"` + IsVerified bool `json:"is_verified"` +} + +// ObserverEvaluationData represents the data of Observer's evaluation +type ObserverEvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + IsChallengeTimestampOK bool `json:"is_challenge_timestamp_ok"` + IsProcessTimestampOK bool `json:"is_process_timestamp_ok"` + IsEvaluationTimestampOK bool `json:"is_evaluation_timestamp_ok"` + IsRecipientSignatureOK bool `json:"is_recipient_signature_ok"` + IsChallengerSignatureOK bool `json:"is_challenger_signature_ok"` + IsEvaluationResultOK bool `json:"is_evaluation_result_ok"` + Reason string `json:"reason"` + TrueHash string `json:"true_hash"` + Timestamp time.Time `json:"timestamp"` +} + +// StorageChallengeLogMessage represents the message log to be stored in the DB +type StorageChallengeLogMessage struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + Sender string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// StorageChallengeMetric represents the metric log to be stored in the DB +type StorageChallengeMetric struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` +} + +// BroadcastLogMessage represents the broadcast message log to be stored in the DB +type BroadcastLogMessage struct { + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` +} + +// BroadcastMessageMetrics is the struct for broadcast message metrics +type BroadcastMessageMetrics struct { + ID int `db:"id"` + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// ProcessBroadcastChallengeMetricsRequest represents the request for broadcasting metrics +type ProcessBroadcastChallengeMetricsRequest struct { + Data []byte `json:"data"` + SenderID string `json:"sender_id"` +} + +type StorageChallengeMessages []Message + +// Hash returns the hash of the storage-challenge challenge log data +func (mdl StorageChallengeMessages) Hash() string { + data, _ := json.Marshal(mdl) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} + +// NScMetric gets the latest challenge IDs from the DB +type NScMetric struct { + Count int + ChallengeID string + CreatedAt string +} + +// NHcMetric gets the latest health-check challenge IDs from the DB +type NHcMetric struct { + Count int + ChallengeID string + CreatedAt string +} + +type AccumulativeChallengeData struct { + NodeID string `db:"node_id"` + IPAddress string `db:"ip_address"` + TotalChallengesAsRecipients int `db:"total_challenges_as_recipients"` + TotalChallengesAsObservers int `db:"total_challenges_as_observers"` + TotalChallengesAsChallengers int `db:"total_challenges_as_challengers"` + CorrectChallengerEvaluations int `db:"correct_challenger_evaluations"` + CorrectObserverEvaluations int `db:"correct_observer_evaluations"` + CorrectRecipientEvaluations int `db:"correct_recipient_evaluations"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// AggregatedScore represents the structure of data in the aggregated_challenge_scores table +type AggregatedScore struct { + NodeID string + IPAddress string + StorageChallengeScore float64 + HealthCheckChallengeScore float64 + CreatedAt time.Time + UpdatedAt time.Time +} + +type AggregatedScoreList []AggregatedScore + +func (asl AggregatedScoreList) Hash() string { + data, _ := json.Marshal(asl) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} + +type ScoreAggregationEvent struct { + ChallengeID string `db:"challenge_id"` + IsAggregated bool `db:"is_aggregated"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} diff --git a/pkg/types/ticket.go b/pkg/types/ticket.go new file mode 100644 index 00000000..9698e0d1 --- /dev/null +++ b/pkg/types/ticket.go @@ -0,0 +1,89 @@ +package types + +import ( + "time" +) + +type File struct { + FileID string + UploadTimestamp time.Time + Path string + FileIndex string + BaseFileID string + TaskID string + RegTxid string + ActivationTxid string + ReqBurnTxnAmount float64 + BurnTxnID string + ReqAmount float64 + IsConcluded bool + CascadeMetadataTicketID string + UUIDKey string + HashOfOriginalBigFile string + NameOfOriginalBigFileWithExt string + SizeOfOriginalBigFile float64 + DataTypeOfOriginalBigFile string + StartBlock int32 + DoneBlock int + PastelID string + Passphrase string +} + +type Files []*File + +func (f Files) Names() []string { + names := make([]string, 0, len(f)) + for _, file := range f { + names = append(names, file.FileID) + } + return names +} + +type RegistrationAttempt struct { + ID int + FileID string + BaseFileID string + RegStartedAt time.Time + ProcessorSNS string + FinishedAt time.Time + IsSuccessful bool + IsConfirmed bool + ErrorMessage string +} + +type ActivationAttempt struct { + ID int + FileID string + BaseFileID string + ActivationAttemptAt time.Time + IsSuccessful bool + IsConfirmed bool + ErrorMessage string +} + +func (fs Files) GetUnconcludedFiles() (Files, error) { + var unconcludedFiles Files + for _, f := range fs { + if !f.IsConcluded { + unconcludedFiles = append(unconcludedFiles, f) + } + } + + return unconcludedFiles, nil +} + +func (fs Files) GetBase() *File { + for _, f := range fs { + if f.FileIndex == "0" { + return f + } + } + + return nil +} + +type MultiVolCascadeTicketTxIDMap struct { + ID int64 + MultiVolCascadeTicketTxid string + BaseFileID string +} diff --git a/pkg/types/types.go b/pkg/types/types.go new file mode 100644 index 00000000..09238d2c --- /dev/null +++ b/pkg/types/types.go @@ -0,0 +1,142 @@ +package types + +import ( + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + + json "github.com/json-iterator/go" +) + +// MeshedSuperNode represents meshed sn +type MeshedSuperNode struct { + SessID string + NodeID string +} + +// NftRegMetadata represents nft reg metadata +type NftRegMetadata struct { + CreatorPastelID string + BlockHash string + BlockHeight string + Timestamp string + GroupID string + CollectionTxID string +} + +// ActionRegMetadata represents action reg metadata +type ActionRegMetadata struct { + CreatorPastelID string + BlockHash string + BurnTxID string + BlockHeight string + Timestamp string + EstimatedFee int64 + GroupID string + CollectionTxID string +} + +// TaskHistory represents task history +type TaskHistory struct { + ID int + TaskID string + CreatedAt time.Time + Status string + Details *Details +} + +// StorageChallengeStatus represents possible storage challenge statuses +type StorageChallengeStatus int + +const ( + //UndefinedStorageChallengeStatus represents invalid storage challenge type + UndefinedStorageChallengeStatus StorageChallengeStatus = iota + //GeneratedStorageChallengeStatus represents when the challenge is stored after generation + GeneratedStorageChallengeStatus + //ProcessedStorageChallengeStatus represents when the challenge is stored after processing + ProcessedStorageChallengeStatus + //VerifiedStorageChallengeStatus represents when the challenge is stored after verification + VerifiedStorageChallengeStatus +) + +// StorageChallenge represents storage challenge log +type StorageChallenge struct { + ID int64 + ChallengeID string + FileHash string + ChallengingNode string + RespondingNode string + VerifyingNodes string + GeneratedHash string + Status StorageChallengeStatus + StartingIndex int + EndingIndex int + CreatedAt time.Time + UpdatedAt time.Time +} + +// SelfHealingStatus represents possible self-healing statuses of failed challenge +type SelfHealingStatus string + +const ( + //UndefinedSelfHealingStatus represents invalid status for self-healing operation + UndefinedSelfHealingStatus SelfHealingStatus = "Undefined" + //CreatedSelfHealingStatus represents when the failed challenge gets stored in DB + CreatedSelfHealingStatus SelfHealingStatus = "Created" + //InProgressSelfHealingStatus represents when the challenge is retrieved for self-healing + InProgressSelfHealingStatus SelfHealingStatus = "InProgress" + //FailedSelfHealingStatus represents when the reconstruction has been completed + FailedSelfHealingStatus SelfHealingStatus = "Failed" + //CompletedSelfHealingStatus represents when the reconstruction has been completed + CompletedSelfHealingStatus SelfHealingStatus = "Completed" + //ReconstructionNotRequiredSelfHealingStatus represents when the reconstruction has been completed + ReconstructionNotRequiredSelfHealingStatus SelfHealingStatus = "ReconstructionNotRequired" +) + +// SelfHealingChallenge represents self-healing challenge +type SelfHealingChallenge struct { + ID int64 + ChallengeID string + MerkleRoot string + FileHash string + ChallengingNode string + RespondingNode string + VerifyingNode string + ReconstructedFileHash []byte + Status SelfHealingStatus + CreatedAt time.Time + UpdatedAt time.Time +} + +// Fields represents status log +type Fields map[string]interface{} + +// Details represents status log details with additional fields +type Details struct { + Message string + Fields Fields +} + +// Stringify convert the Details' struct to stringify json +func (d *Details) Stringify() string { + details, err := json.Marshal(&d) + if err != nil { + log.WithError(err).Error("unable to marshal task history details") + return "" + } + + return string(details) +} + +// NewDetails initializes and return the valid detail object +func NewDetails(msg string, fields Fields) *Details { + return &Details{ + Message: msg, + Fields: fields, + } +} + +// IsValid checks if the status log map is not empty +func (f Fields) IsValid() bool { + return len(f) != 0 +} diff --git a/proto/proto.go b/proto/proto.go new file mode 100644 index 00000000..34045007 --- /dev/null +++ b/proto/proto.go @@ -0,0 +1,6 @@ +package proto + +const ( + // MetadataKeySessID is unique numeric for every registration process, encompasses for all connections. + MetadataKeySessID = "sessID" +) diff --git a/proto/supernode/action/cascade/service.proto b/proto/supernode/action/cascade/service.proto new file mode 100644 index 00000000..3b3aa408 --- /dev/null +++ b/proto/supernode/action/cascade/service.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package cascade; + +option go_package = "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade"; + +service CascadeService { + rpc UploadInputData (UploadInputDataRequest) returns (UploadInputDataResponse); +} + +message UploadInputDataRequest { + string filename = 1; + string action_id = 2; + string data_hash = 3; + int32 rq_ic = 4; + int32 rq_max = 5; +} + +message UploadInputDataResponse { + bool success = 1; + string message = 2; +} diff --git a/proto/supernode/supernode/cascade_service.proto b/proto/supernode/supernode/cascade_service.proto new file mode 100644 index 00000000..ff88c41c --- /dev/null +++ b/proto/supernode/supernode/cascade_service.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +option go_package = "github.com/LumeraProtocol/supernode/gen/supernode/supernode"; + +package supernode; + +// this proto defines GRPC methods that SN uses to talk to another SNs during Cascade Registration + +service CascadeService { + // Session informs primary supernode about its `nodeID` and `sessID` it wants to connect to. + // The stream is used by the parties to inform each other about the cancellation of the task. + rpc Session(stream SessionRequest) returns (stream SessionReply); + + // SendSenseTicketSignature send signature from supernodes mn2/mn3 for given reg NFT session id to primary supernode + rpc SendCascadeTicketSignature(SendTicketSignatureRequest) returns (SendTicketSignatureReply); +} + +message SessionRequest { + string nodeID = 1; +} + +message SessionReply { + string sessID = 1; +} + +message SendTicketSignatureRequest { + string nodeID = 1; + bytes signature = 2; + bytes data = 3; + bytes rqFile = 4; + EncoderParameters rqEncodeParams = 5; +} + +message SendTicketSignatureReply {} + +message EncoderParameters { + bytes Oti = 1; +} diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go new file mode 100644 index 00000000..751cec60 --- /dev/null +++ b/supernode/node/action/server/cascade/cascade_action_server.go @@ -0,0 +1,20 @@ +package cascade + +import ( + cascadeGen "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/supernode/node/common" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +type CascadeActionServer struct { + cascadeGen.UnimplementedCascadeServiceServer + + *common.RegisterCascade +} + +// NewCascadeActionServer returns a new CascadeActionServer instance. +func NewCascadeActionServer(service *cascade.CascadeService) *CascadeActionServer { + return &CascadeActionServer{ + RegisterCascade: common.NewRegisterCascade(service), + } +} diff --git a/supernode/node/action/server/cascade/upload_cascade_action_input.go b/supernode/node/action/server/cascade/upload_cascade_action_input.go new file mode 100644 index 00000000..df6a1eef --- /dev/null +++ b/supernode/node/action/server/cascade/upload_cascade_action_input.go @@ -0,0 +1,39 @@ +package cascade + +import ( + "context" + "fmt" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + cascadeService "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +func (s *CascadeActionServer) UploadInputData(ctx context.Context, req *pb.UploadInputDataRequest) (*pb.UploadInputDataResponse, error) { + fields := logtrace.Fields{ + logtrace.FieldMethod: "UploadInputData", + logtrace.FieldModule: "CascadeActionServer", + logtrace.FieldRequest: req, + } + logtrace.Info(ctx, "request to upload cascade input data received", fields) + + task, err := s.TaskFromMD(ctx) + if err != nil { + return nil, err + } + + res, err := task.UploadInputData(ctx, &cascadeService.UploadInputDataRequest{ + Filename: req.Filename, + ActionID: req.ActionId, + DataHash: req.DataHash, + RqIc: req.RqIc, + RqMax: req.RqMax, + }) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to upload input data", fields) + return &pb.UploadInputDataResponse{}, fmt.Errorf("cascade services upload input data error: %w", err) + } + + return &pb.UploadInputDataResponse{Success: res.Success, Message: res.Message}, nil +} diff --git a/supernode/node/common/register_cascade.go b/supernode/node/common/register_cascade.go new file mode 100644 index 00000000..9f4ef70f --- /dev/null +++ b/supernode/node/common/register_cascade.go @@ -0,0 +1,51 @@ +package common + +import ( + "context" + + "google.golang.org/grpc/metadata" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/proto" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +// RegisterCascade represents common grpc services for registration sense. +type RegisterCascade struct { + *cascade.CascadeService +} + +// SessID retrieves SessID from the metadata. +func (service *RegisterCascade) SessID(ctx context.Context) (string, bool) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", false + } + + mdVals := md.Get(proto.MetadataKeySessID) + if len(mdVals) == 0 { + return "", false + } + return mdVals[0], true +} + +// TaskFromMD returns task by SessID from the metadata. +func (service *RegisterCascade) TaskFromMD(ctx context.Context) (*cascade.CascadeRegistrationTask, error) { + sessID, ok := service.SessID(ctx) + if !ok { + return nil, errors.New("not found sessID in metadata") + } + + task := service.Task(sessID) + if task == nil { + return nil, errors.Errorf("not found %q task", sessID) + } + return task, nil +} + +// NewRegisterCascade returns a new RegisterSense instance. +func NewRegisterCascade(service *cascade.CascadeService) *RegisterCascade { + return &RegisterCascade{ + CascadeService: service, + } +} diff --git a/supernode/node/supernode/client/cascade_supernode_client.go b/supernode/node/supernode/client/cascade_supernode_client.go new file mode 100644 index 00000000..247143ca --- /dev/null +++ b/supernode/node/supernode/client/cascade_supernode_client.go @@ -0,0 +1,73 @@ +package client + +import ( + "context" + "io" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +type SupernodeCascadeActionClient struct { + sessID string + + conn *clientConn + client pb.CascadeServiceClient +} + +func (service *SupernodeCascadeActionClient) SessID() string { + return service.sessID +} + +func (service *SupernodeCascadeActionClient) Session(ctx context.Context, nodeID, sessID string) error { + service.sessID = sessID + + stream, err := service.client.Session(ctx) + if err != nil { + return errors.Errorf("open Health stream: %w", err) + } + + req := &pb.SessionRequest{ + NodeID: nodeID, + } + + if err := stream.Send(req); err != nil { + return errors.Errorf("send Session request: %w", err) + } + + resp, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + switch status.Code(err) { + case codes.Canceled, codes.Unavailable: + return nil + } + return errors.Errorf("receive Session response: %w", err) + } + log.WithContext(ctx).WithField("resp", resp).Debug("Session response") + + go func() { + defer service.conn.Close() + for { + if _, err := stream.Recv(); err != nil { + return + } + } + }() + + return nil +} + +func newSupernodeCascadeActionClient(conn *clientConn) node.RegisterCascadeInterface { + return &SupernodeCascadeActionClient{ + conn: conn, + client: pb.NewCascadeServiceClient(conn), + } +} diff --git a/supernode/node/supernode/client/client.go b/supernode/node/supernode/client/client.go new file mode 100644 index 00000000..5d24e8b8 --- /dev/null +++ b/supernode/node/supernode/client/client.go @@ -0,0 +1,50 @@ +package client + +import ( + "context" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + netgrpcclient "github.com/LumeraProtocol/supernode/pkg/net/grpc/client" + "github.com/LumeraProtocol/supernode/pkg/random" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + _ "google.golang.org/grpc/keepalive" +) + +// this implements SN's GRPC methods that call another SN during Cascade Registration +// meaning - these methods implements client side of SN to SN GRPC communication + +type client struct { + *netgrpcclient.Client + keyRing keyring.Keyring + superNodeAccAddress string +} + +// Connect implements node.Client.Connect() +func (c *client) Connect(ctx context.Context, address string) (node.ConnectionInterface, error) { + clientOptions := netgrpcclient.DefaultClientOptions() + clientOptions.ConnWaitTime = 30 * time.Minute + clientOptions.MinConnectTimeout = 30 * time.Minute + clientOptions.EnableRetries = false + + id, _ := random.String(8, random.Base62Chars) + + grpcConn, err := c.Client.Connect(ctx, address, clientOptions) + if err != nil { + log.WithContext(ctx).WithError(err).Error("DialContext err") + return nil, errors.Errorf("dial address %s: %w", address, err) + } + + log.WithContext(ctx).Debugf("Connected to %s", address) + + conn := newClientConn(id, grpcConn) + + go func() { + //<-conn.Done() + log.WithContext(ctx).Debugf("Disconnected %s", grpcConn.Target()) + }() + + return conn, nil +} diff --git a/supernode/node/supernode/client/connection.go b/supernode/node/supernode/client/connection.go new file mode 100644 index 00000000..51471f99 --- /dev/null +++ b/supernode/node/supernode/client/connection.go @@ -0,0 +1,26 @@ +package client + +import ( + "google.golang.org/grpc" + + "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +// clientConn represents grpc client connection. +type clientConn struct { + *grpc.ClientConn + + id string +} + +// RegisterCascade implements node.ConnectionInterface.RegisterSense() +func (conn *clientConn) RegisterCascade() supernode.RegisterCascadeInterface { + return newSupernodeCascadeActionClient(conn) +} + +func newClientConn(id string, conn *grpc.ClientConn) supernode.ConnectionInterface { + return &clientConn{ + ClientConn: conn, + id: id, + } +} diff --git a/supernode/node/supernode/client/send_cascade_ticket_signature.go b/supernode/node/supernode/client/send_cascade_ticket_signature.go new file mode 100644 index 00000000..ef247d30 --- /dev/null +++ b/supernode/node/supernode/client/send_cascade_ticket_signature.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/pkg/raptorq" +) + +// SendCascadeTicketSignature implements SendCascadeTicketSignature +func (service *SupernodeCascadeActionClient) SendCascadeTicketSignature(ctx context.Context, nodeID string, signature []byte, data []byte, rqFile []byte, rqEncodeParams raptorq.EncoderParameters) error { + ctx = contextWithMDSessID(ctx, service.sessID) + + _, err := service.client.SendCascadeTicketSignature(ctx, &pb.SendTicketSignatureRequest{ + NodeID: nodeID, + Signature: signature, + Data: data, + RqFile: rqFile, + RqEncodeParams: &pb.EncoderParameters{ + Oti: rqEncodeParams.Oti, + }, + }) + + return err +} diff --git a/supernode/node/supernode/client/session.go b/supernode/node/supernode/client/session.go new file mode 100644 index 00000000..3dde0bbf --- /dev/null +++ b/supernode/node/supernode/client/session.go @@ -0,0 +1,13 @@ +package client + +import ( + "context" + + "github.com/LumeraProtocol/supernode/proto" + "google.golang.org/grpc/metadata" +) + +func contextWithMDSessID(ctx context.Context, sessID string) context.Context { + md := metadata.Pairs(proto.MetadataKeySessID, sessID) + return metadata.NewOutgoingContext(ctx, md) +} diff --git a/supernode/node/supernode/node_client_interface.go b/supernode/node/supernode/node_client_interface.go new file mode 100644 index 00000000..c7a64fe8 --- /dev/null +++ b/supernode/node/supernode/node_client_interface.go @@ -0,0 +1,44 @@ +package supernode + +import ( + "context" + + "github.com/LumeraProtocol/supernode/pkg/raptorq" +) + +// ClientInterface represents a base connection interface. +type ClientInterface interface { + // Connect connects to the server at the given address. + Connect(ctx context.Context, address string) (ConnectionInterface, error) +} + +// ConnectionInterface represents a client connection +type ConnectionInterface interface { + // Close closes connection. + Close() error + // RegisterCascade returns a new RegisterCascade stream + RegisterCascade() RegisterCascadeInterface +} + +// SuperNodePeerAPIInterface base interface for other Node API interfaces +type SuperNodePeerAPIInterface interface { + // SessID returns the taskID received from the server during the handshake. + SessID() (taskID string) + // Session sets up an initial connection with primary supernode, by telling sessID and its own nodeID. + Session(ctx context.Context, nodeID, sessID string) (err error) +} + +// revive:disable:exported + +// NodeMaker interface to make concrete node types +type NodeMaker interface { + MakeNode(conn ConnectionInterface) SuperNodePeerAPIInterface +} + +// RegisterCascadeInterface represents an interaction stream with supernodes for registering sense. +type RegisterCascadeInterface interface { + SuperNodePeerAPIInterface + + // Send signature of ticket to primary supernode + SendCascadeTicketSignature(ctx context.Context, nodeID string, signature []byte, data []byte, rqFile []byte, rqEncodeParams raptorq.EncoderParameters) error +} diff --git a/supernode/node/supernode/server/cascade/cascade_supernode_server.go b/supernode/node/supernode/server/cascade/cascade_supernode_server.go new file mode 100644 index 00000000..0a496b1f --- /dev/null +++ b/supernode/node/supernode/server/cascade/cascade_supernode_server.go @@ -0,0 +1,24 @@ +package cascade + +import ( + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/supernode/node/common" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +// this implements SN's GRPC methods that are called by another SNs during Cascade Registration +// meaning - these methods implements server side of SN to SN GRPC communication + +// RegisterCascade represents grpc services for registration Sense tickets. +type RegisterCascade struct { + pb.UnimplementedCascadeServiceServer + + *common.RegisterCascade +} + +// NewRegisterCascade returns a new RegisterCascade instance. +func NewRegisterCascade(service *cascade.CascadeService) *RegisterCascade { + return &RegisterCascade{ + RegisterCascade: common.NewRegisterCascade(service), + } +} diff --git a/supernode/node/supernode/server/cascade/send_cascade_ticket_signature.go b/supernode/node/supernode/server/cascade/send_cascade_ticket_signature.go new file mode 100644 index 00000000..04aac103 --- /dev/null +++ b/supernode/node/supernode/server/cascade/send_cascade_ticket_signature.go @@ -0,0 +1,28 @@ +package cascade + +import ( + "context" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + sc "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +// SendCascadeTicketSignature implements supernode.RegisterCascadeServer.SendCascadeTicketSignature() +func (service *RegisterCascade) SendCascadeTicketSignature(ctx context.Context, req *pb.SendTicketSignatureRequest) (*pb.SendTicketSignatureReply, error) { + log.WithContext(ctx).WithField("req", req).Debugf("SendCascadeTicketSignature request") + task, err := service.TaskFromMD(ctx) + if err != nil { + return nil, err + } + + // TODO : add rq file to req, also confirm which func to call + err = task.ValidateSignedTicketFromSecondaryNode(ctx, req.Data, req.NodeID, req.Signature, req.RqFile) + + if err := task.AddPeerTicketSignature(req.NodeID, req.Signature, sc.StatusAssetUploaded); err != nil { + return nil, errors.Errorf("add peer signature %w", err) + } + + return &pb.SendTicketSignatureReply{}, nil +} diff --git a/supernode/node/supernode/server/cascade/session.go b/supernode/node/supernode/server/cascade/session.go new file mode 100644 index 00000000..ed8670a4 --- /dev/null +++ b/supernode/node/supernode/server/cascade/session.go @@ -0,0 +1,81 @@ +package cascade + +import ( + "context" + "io" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +// Session implements supernode.RegisterSenseServer.Session() +func (service *RegisterCascade) Session(stream pb.CascadeService_SessionServer) error { + ctx, cancel := context.WithCancel(stream.Context()) + defer cancel() + + var task *cascade.CascadeRegistrationTask + isTaskNew := false + + if sessID, ok := service.SessID(ctx); ok { + if task = service.Task(sessID); task == nil { + return errors.Errorf("not found %q task", sessID) + } + } else { + task = service.NewCascadeRegistrationTask() + isTaskNew = true + } + + go func() { + <-task.Done() + cancel() + }() + + if isTaskNew { + defer task.Cancel() + } + + peer, _ := peer.FromContext(ctx) + log.WithContext(ctx).WithField("addr", peer.Addr).Debugf("Session stream") + defer log.WithContext(ctx).WithField("addr", peer.Addr).Debugf("Session stream closed") + + req, err := stream.Recv() + if err != nil { + return errors.Errorf("receive handshake request: %w", err) + } + log.WithContext(ctx).WithField("req", req).Debugf("Session request") + + if err := task.NetworkHandler.SessionNode(ctx, req.NodeID); err != nil { + return err + } + + if !isTaskNew { + defer task.Cancel() + } + + resp := &pb.SessionReply{ + SessID: task.ID(), + } + if err := stream.Send(resp); err != nil { + return errors.Errorf("send handshake response: %w", err) + } + log.WithContext(ctx).WithField("resp", resp).Debugf("Session response") + + for { + if _, err := stream.Recv(); err != nil { + if err == io.EOF { + return nil + } + switch status.Code(err) { + case codes.Canceled, codes.Unavailable: + return nil + } + return errors.Errorf("handshake stream closed: %w", err) + } + } +} diff --git a/supernode/node/supernode/server/common/register_cascade.go b/supernode/node/supernode/server/common/register_cascade.go new file mode 100644 index 00000000..08cdc0ad --- /dev/null +++ b/supernode/node/supernode/server/common/register_cascade.go @@ -0,0 +1,56 @@ +package common + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/proto" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +// RegisterCascade represents common grpc services for registration sense. +type RegisterCascade struct { + *cascade.CascadeService +} + +// SessID retrieves SessID from the metadata. +func (service *RegisterCascade) SessID(ctx context.Context) (string, bool) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", false + } + + mdVals := md.Get(proto.MetadataKeySessID) + if len(mdVals) == 0 { + return "", false + } + return mdVals[0], true +} + +// TaskFromMD returns task by SessID from the metadata. +func (service *RegisterCascade) TaskFromMD(ctx context.Context) (*cascade.CascadeRegistrationTask, error) { + sessID, ok := service.SessID(ctx) + if !ok { + return nil, errors.New("not found sessID in metadata") + } + + task := service.Task(sessID) + if task == nil { + return nil, errors.Errorf("not found %q task", sessID) + } + return task, nil +} + +func (service *RegisterCascade) Desc() *grpc.ServiceDesc { + return &grpc.ServiceDesc{ServiceName: "supernode.RegisterCascade", HandlerType: (*RegisterCascade)(nil)} +} + +// NewRegisterCascade returns a new RegisterSense instance. +func NewRegisterCascade(service *cascade.CascadeService) *RegisterCascade { + return &RegisterCascade{ + CascadeService: service, + } +} diff --git a/supernode/node/supernode/server/config.go b/supernode/node/supernode/server/config.go new file mode 100644 index 00000000..722857e0 --- /dev/null +++ b/supernode/node/supernode/server/config.go @@ -0,0 +1,20 @@ +package server + +const ( + defaultListenAddresses = "0.0.0.0" + defaultPort = 4444 +) + +// Config contains settings of the supernode server. +type Config struct { + ListenAddresses string `mapstructure:"listen_addresses" json:"listen_addresses,omitempty"` + Port int `mapstructure:"port" json:"port,omitempty"` +} + +// NewConfig returns a new Config instance. +func NewConfig() *Config { + return &Config{ + ListenAddresses: defaultListenAddresses, + Port: defaultPort, + } +} diff --git a/supernode/node/supernode/server/server.go b/supernode/node/supernode/server/server.go new file mode 100644 index 00000000..4ea26161 --- /dev/null +++ b/supernode/node/supernode/server/server.go @@ -0,0 +1,141 @@ +package server + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" +) + +type service interface { + Desc() *grpc.ServiceDesc +} + +// Server represents supernode server +type Server struct { + config *Config + services []service + name string + //secClient alts.SecClient + //secInfo *alts.SecInfo +} + +// Run starts the server +func (server *Server) Run(ctx context.Context) error { + grpclog.SetLoggerV2(log.NewLoggerWithErrorLevel()) + ctx = log.ContextWithPrefix(ctx, server.name) + + group, ctx := errgroup.WithContext(ctx) + + addresses := strings.Split(server.config.ListenAddresses, ",") + grpcServer := server.grpcServer(ctx) + if grpcServer == nil { + return fmt.Errorf("initialize grpc server failed") + } + + for _, address := range addresses { + addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.config.Port)) + + group.Go(func() error { + return server.listen(ctx, addr, grpcServer) + }) + } + + return group.Wait() +} + +func (server *Server) listen(ctx context.Context, address string, grpcServer *grpc.Server) (err error) { + listen, err := net.Listen("tcp", address) + if err != nil { + return errors.Errorf("listen: %w", err).WithField("address", address) + } + + // The listener that will track connections. + /*listen = &connTrackListener{ + Listener: listen, + connTrack: server.connTrack, // connection tracker + }*/ + + errCh := make(chan error, 1) + go func() { + defer errors.Recover(func(recErr error) { err = recErr }) + log.WithContext(ctx).Infof("gRPC server listening on %q", address) + if err := grpcServer.Serve(listen); err != nil { + errCh <- errors.Errorf("serve: %w", err).WithField("address", address) + } + }() + + select { + case <-ctx.Done(): + log.WithContext(ctx).Infof("Shutting down gRPC server at %q", address) + grpcServer.GracefulStop() + case err := <-errCh: + return err + } + + return nil +} + +func (server *Server) grpcServer(ctx context.Context) *grpc.Server { + //if server.secClient == nil || server.secInfo == nil { + // log.WithContext(ctx).Errorln("secClient or secInfo don't initialize") + // return nil + //} + + //// Define the keep-alive parameters + //kaParams := keepalive.ServerParameters{ + // MaxConnectionIdle: 2 * time.Hour, + // MaxConnectionAge: 2 * time.Hour, + // MaxConnectionAgeGrace: 1 * time.Hour, + // Time: 1 * time.Hour, + // Timeout: 30 * time.Minute, + //} + // + //// Define the keep-alive enforcement policy + //kaPolicy := keepalive.EnforcementPolicy{ + // MinTime: 3 * time.Minute, // Minimum time a client should wait before sending keep-alive probes + // PermitWithoutStream: true, // Only allow pings when there are active streams + //} + + var grpcServer *grpc.Server + //if os.Getenv("INTEGRATION_TEST_ENV") == "true" { + // grpcServer = grpc.NewServer(middleware.UnaryInterceptor(), middleware.StreamInterceptor(), grpc.MaxSendMsgSize(100000000), + // grpc.MaxRecvMsgSize(100000000), grpc.KeepaliveParams(kaParams), // Use the keep-alive parameters + // grpc.KeepaliveEnforcementPolicy(kaPolicy)) + //} else { + // + // grpcServer = grpc.NewServer(middleware.UnaryInterceptor(), middleware.StreamInterceptor(), + // middleware.AltsCredential(server.secClient, server.secInfo), grpc.MaxSendMsgSize(100000000), + // grpc.MaxRecvMsgSize(100000000), grpc.KeepaliveParams(kaParams), // Use the keep-alive parameters + // grpc.KeepaliveEnforcementPolicy(kaPolicy)) + //} + + for _, service := range server.services { + log.WithContext(ctx).Debugf("Register services %q", service.Desc().ServiceName) + grpcServer.RegisterService(service.Desc(), service) + } + + return grpcServer +} + +// New returns a new Server instance. +func New(config *Config, name string, + //secClient alts.SecClient, + //secInfo *alts.SecInfo, + services ...service) *Server { + return &Server{ + config: config, + //secClient: secClient, + //secInfo: secInfo, + services: services, + name: name, + } +} diff --git a/supernode/services/cascade/cascade_reg_node.go b/supernode/services/cascade/cascade_reg_node.go new file mode 100644 index 00000000..cacb4594 --- /dev/null +++ b/supernode/services/cascade/cascade_reg_node.go @@ -0,0 +1,20 @@ +package cascade + +import ( + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +// RegisterCascadeNodeMaker makes concrete instance of CascadeRegistrationNode +type RegisterCascadeNodeMaker struct { + node.NodeMaker +} + +// MakeNode makes concrete instance of CascadeRegistrationNode +func (maker RegisterCascadeNodeMaker) MakeNode(conn node.ConnectionInterface) node.SuperNodePeerAPIInterface { + return &CascadeRegistrationNode{RegisterCascadeInterface: conn.RegisterCascade()} +} + +// CascadeRegistrationNode represent supernode connection. +type CascadeRegistrationNode struct { + node.RegisterCascadeInterface +} diff --git a/supernode/services/cascade/config.go b/supernode/services/cascade/config.go new file mode 100644 index 00000000..caa145fb --- /dev/null +++ b/supernode/services/cascade/config.go @@ -0,0 +1,27 @@ +package cascade + +import ( + "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +const ( + defaultNumberConnectedNodes = 2 + defaultPreburntTxMinConfirmations = 3 +) + +// Config contains settings of the registering Nft. +type Config struct { + common.Config `mapstructure:",squash" json:"-"` + + RaptorQServiceAddress string `mapstructure:"-" json:"-"` + RqFilesDir string + + NumberConnectedNodes int `mapstructure:"-" json:"number_connected_nodes,omitempty"` +} + +// NewConfig returns a new Config instance. +func NewConfig() *Config { + return &Config{ + NumberConnectedNodes: defaultNumberConnectedNodes, + } +} diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go new file mode 100644 index 00000000..5a564458 --- /dev/null +++ b/supernode/services/cascade/service.go @@ -0,0 +1,67 @@ +package cascade + +import ( + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage" + "github.com/LumeraProtocol/supernode/pkg/storage/queries" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" + "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +type CascadeService struct { + *common.SuperNodeService + config *Config + + lumeraClient lumera.Client + raptorQ raptorq.RaptorQ + raptorQClient raptorq.ClientInterface + + nodeClient node.ClientInterface + rqstore rqstore.Store + historyDB queries.LocalStoreInterface +} + +// NewCascadeRegistrationTask runs a new task of the registration Sense and returns its taskID. +func (s *CascadeService) NewCascadeRegistrationTask() *CascadeRegistrationTask { + task := NewCascadeRegistrationTask(s) + s.Worker.AddTask(task) + + return task +} + +// Task returns the task of the Sense registration by the given id. +func (s *CascadeService) Task(id string) *CascadeRegistrationTask { + if s.Worker.Task(id) == nil { + return nil + } + + return s.Worker.Task(id).(*CascadeRegistrationTask) +} + +// NewCascadeService returns a new CascadeService instance. +func NewCascadeService(config *Config, + lumera lumera.Client, + fileStorage storage.FileStorageInterface, + nodeClient node.ClientInterface, + p2pClient p2p.Client, + rqC raptorq.RaptorQ, + rqClient raptorq.ClientInterface, + rqstore rqstore.Store, +) *CascadeService { + return &CascadeService{ + config: config, + SuperNodeService: common.NewSuperNodeService(fileStorage, p2pClient), + lumeraClient: lumera, + nodeClient: nodeClient, + raptorQ: rqC, + raptorQClient: rqClient, + rqstore: rqstore, + } +} + +func (s *CascadeService) GetSNAddress() string { + return s.config.SupernodeAccountAddress // FIXME : verify +} diff --git a/supernode/services/cascade/task.go b/supernode/services/cascade/task.go new file mode 100644 index 00000000..ea3c7026 --- /dev/null +++ b/supernode/services/cascade/task.go @@ -0,0 +1,63 @@ +package cascade + +import ( + "context" + + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage/files" + "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +type RQInfo struct { + rqIDsIC uint32 + rqIDs []string + rqIDEncodeParams raptorq.EncoderParameters + + rqIDsFile []byte + rawRqFile []byte + rqIDFiles [][]byte +} + +// CascadeRegistrationTask is the task of registering new Sense. +type CascadeRegistrationTask struct { + RQInfo + *CascadeService + + *common.SuperNodeTask + *common.RegTaskHelper + storage *common.StorageHandler + + Asset *files.File // TODO : remove + assetSizeBytes int + dataHash string + + creatorSignature []byte +} + +const ( + logPrefix = "cascade" +) + +// Run starts the task +func (task *CascadeRegistrationTask) Run(ctx context.Context) error { + return task.RunHelper(ctx, task.removeArtifacts) +} + +func (task *CascadeRegistrationTask) removeArtifacts() { + task.RemoveFile(task.Asset) +} + +// NewCascadeRegistrationTask returns a new Task instance. +func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { + + task := &CascadeRegistrationTask{ + SuperNodeTask: common.NewSuperNodeTask(logPrefix, service.historyDB), + CascadeService: service, + storage: common.NewStorageHandler(service.P2PClient, service.raptorQClient, + service.config.RaptorQServiceAddress, service.config.RqFilesDir, service.rqstore), + } + + task.RegTaskHelper = common.NewRegTaskHelper(task.SuperNodeTask, service.lumeraClient) + + return task +} diff --git a/supernode/services/cascade/upload.go b/supernode/services/cascade/upload.go new file mode 100644 index 00000000..00970198 --- /dev/null +++ b/supernode/services/cascade/upload.go @@ -0,0 +1,268 @@ +package cascade + +import ( + "context" + "encoding/hex" + "encoding/json" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/types" + "github.com/LumeraProtocol/supernode/pkg/lumera/modules/supernode" + "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" + + ct "github.com/LumeraProtocol/supernode/pkg/common/task" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type UploadInputDataRequest struct { + ActionID string + Filename string + DataHash string + RqIc int32 + RqMax int32 +} + +type UploadInputDataResponse struct { + Success bool + Message string +} + +func (task *CascadeRegistrationTask) UploadInputData(ctx context.Context, req *UploadInputDataRequest) (*UploadInputDataResponse, error) { + fields := logtrace.Fields{ + logtrace.FieldMethod: "UploadInputData", + logtrace.FieldRequest: req, + } + + actionRes, err := task.lumeraClient.Action().GetAction(ctx, req.ActionID) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to get action", fields) + return nil, status.Errorf(codes.Internal, "failed to get action") + } + if actionRes.GetAction().ActionID == "" { + logtrace.Error(ctx, "action not found", fields) + return nil, status.Errorf(codes.Internal, "action not found") + } + actionDetails := actionRes.GetAction() + logtrace.Info(ctx, "action has been retrieved", fields) + + latestBlock, err := task.lumeraClient.Node().GetLatestBlock(ctx) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to get latest block", fields) + return nil, status.Errorf(codes.Internal, "failed to get latest block") + } + latestBlockHeight := uint64(latestBlock.GetSdkBlock().GetHeader().Height) + latestBlockHash := latestBlock.GetBlockId().GetHash() + fields[logtrace.FieldBlockHeight] = latestBlockHeight + logtrace.Info(ctx, "latest block has been retrieved", fields) + + topSNsRes, err := task.lumeraClient.SuperNode().GetTopSuperNodesForBlock(ctx, latestBlockHeight) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to get top SNs", fields) + return nil, status.Errorf(codes.Internal, "failed to get top SNs") + } + logtrace.Info(ctx, "top sns have been fetched", fields) + + if !supernode.Exists(topSNsRes.Supernodes, task.config.SupernodeAccountAddress) { + logtrace.Error(ctx, "current supernode do not exist in the top sns list", fields) + return nil, status.Errorf(codes.Internal, "current supernode does not exist in the top sns list") + } + logtrace.Info(ctx, "current supernode exists in the top sns list", fields) + + if req.DataHash != actionDetails.Metadata.GetCascadeMetadata().DataHash { + logtrace.Error(ctx, "data hash doesn't match", fields) + return nil, status.Errorf(codes.Internal, "data hash doesn't match") + } + logtrace.Info(ctx, "request data-hash has been matched with the action data-hash", fields) + + // FIXME : use proper file + task.rqIDsIC, task.rqIDs, + task.rqIDsFile, task.rqIDEncodeParams, task.creatorSignature, err = task.raptorQ.GenRQIdentifiersFiles(ctx, + fields, + nil, + string(latestBlockHash), actionDetails.GetCreator(), + uint32(actionDetails.Metadata.GetCascadeMetadata().RqMax), + ) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to generate RQID Files", fields) + return nil, status.Errorf(codes.Internal, "failed to generate RQID Files") + } + logtrace.Info(ctx, "rq symbols have been generated", fields) + + ticket, err := task.createCascadeActionTicket(ctx, actionDetails, *latestBlock) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to create cascade ticket", fields) + return nil, status.Errorf(codes.Internal, "failed to create cascade ticket") + } + logtrace.Info(ctx, "cascade ticket created", fields) + + switch task.NetworkHandler.IsPrimary() { + case true: + <-task.NewAction(func(ctx context.Context) error { + logtrace.Info(ctx, "primary node flow, waiting for signature from peers", fields) + for { + select { + case <-ctx.Done(): + err = ctx.Err() + if err != nil { + logtrace.Info(ctx, "waiting for signature from peers cancelled or timeout", fields) + } + + logtrace.Info(ctx, "ctx done return from Validate & Register", fields) + return nil + case <-task.AllSignaturesReceivedChn: + logtrace.Info(ctx, "all signature received so start validation", fields) + + // TODO : MsgFinalizeAction + + return nil + } + } + }) + case false: + <-task.NewAction(func(ctx context.Context) error { + logtrace.Info(ctx, "secondary node flow, sending data with signature to primary node for validation", fields) + + if err = task.signAndSendCascadeTicket(ctx, task.NetworkHandler.ConnectedTo == nil, ticket, task.rqIDsFile, task.rqIDEncodeParams); err != nil { // FIXME : use the right data + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to sign & send cascade ticket to the primary node", fields) + return status.Errorf(codes.Internal, "failed to sign and send cascade ticket") + } + + return nil + }) + } + + return &UploadInputDataResponse{ + Success: true, + Message: "successfully uploaded input data", + }, nil +} + +// sign and send NFT ticket if not primary +func (task *CascadeRegistrationTask) signAndSendCascadeTicket(ctx context.Context, isPrimary bool, ticket []byte, data []byte, rqEncodeParams raptorq.EncoderParameters) (err error) { + secondaryNodeSignature, err := task.lumeraClient.Node().Sign(task.config.SupernodeAccountAddress, ticket) + if err != nil { + return errors.Errorf("sign ticket: %w", err) + } + + if !isPrimary { + log.WithContext(ctx).Info("send signed cascade ticket to primary node") + + cascadeNode, ok := task.NetworkHandler.ConnectedTo.SuperNodePeerAPIInterface.(*CascadeRegistrationNode) + if !ok { + return errors.Errorf("node is not SenseRegistrationNode") + } + + if err := cascadeNode.SendCascadeTicketSignature(ctx, task.config.SupernodeAccountAddress, secondaryNodeSignature, data, task.rqIDsFile, rqEncodeParams); err != nil { // FIXME : nodeID + return errors.Errorf("send signature to primary node %s at address %s: %w", task.NetworkHandler.ConnectedTo.ID, task.NetworkHandler.ConnectedTo.Address, err) + } + } + + return nil +} + +func (task *CascadeRegistrationTask) ValidateSignedTicketFromSecondaryNode(ctx context.Context, + ticket []byte, supernodeAccAddress string, supernodeSignature []byte, rqidFile []byte) error { + var err error + + fields := logtrace.Fields{ + logtrace.FieldMethod: "ValidateSignedTicketFromSecondaryNode", + logtrace.FieldSupernodeAccountAddress: supernodeAccAddress, + } + logtrace.Info(ctx, "request has been received to validate signature", fields) + + err = task.lumeraClient.Node().Verify(supernodeAccAddress, ticket, supernodeSignature) + if err != nil { + log.WithContext(ctx).WithError(err).Errorf("error verifying the secondary-supernode signature") + return errors.Errorf("verify cascade ticket signature %w", err) + } + logtrace.Info(ctx, "seconday-supernode signature has been verified", fields) + + var cascadeData ct.CascadeTicket + err = json.Unmarshal(ticket, &cascadeData) + if err != nil { + log.WithContext(ctx).WithError(err).Errorf("unmarshal cascade ticket signature") + return errors.Errorf("unmarshal cascade ticket signature %w", err) + } + logtrace.Info(ctx, "data has been unmarshalled", fields) + + if err := task.validateRqIDs(ctx, rqidFile, &cascadeData); err != nil { + log.WithContext(ctx).WithError(err).Errorf("validate rqids files") + + return errors.Errorf("validate rq & dd id files %w", err) + } + + if err = task.validateRQSymbolID(ctx, &cascadeData); err != nil { + log.WithContext(ctx).WithError(err).Errorf("valdate rq ids inside rqids file") + err = errors.Errorf("generate rqids: %w", err) + return nil + } + + task.dataHash = cascadeData.DataHash + + return nil +} + +// validates RQIDs file +func (task *CascadeRegistrationTask) validateRqIDs(ctx context.Context, dd []byte, ticket *ct.CascadeTicket) error { + snAccAddresses := []string{ticket.Creator} + + var err error + task.rawRqFile, task.rqIDFiles, err = task.ValidateIDFiles(ctx, dd, + ticket.RQIDsIC, uint32(ticket.RQIDsMax), + ticket.RQIDs, 1, + snAccAddresses, + task.lumeraClient, + ticket.CreatorSignature, + ) + if err != nil { + return errors.Errorf("validate rq_ids file: %w", err) + } + + return nil +} + +// validates actual RQ Symbol IDs inside RQIDs file +func (task *CascadeRegistrationTask) validateRQSymbolID(ctx context.Context, ticket *ct.CascadeTicket) error { + + content, err := task.Asset.Bytes() + if err != nil { + return errors.Errorf("read image contents: %w", err) + } + + return task.storage.ValidateRaptorQSymbolIDs(ctx, + content /*uint32(len(task.Ticket.AppTicketData.RQIDs))*/, 1, + hex.EncodeToString([]byte(ticket.BlockHash)), ticket.Creator, + task.rawRqFile) +} + +func (task *CascadeRegistrationTask) createCascadeActionTicket(ctx context.Context, + actionDetails *actiontypes.Action, latestBlock cmtservice.GetLatestBlockResponse) ([]byte, error) { + t := ct.CascadeTicket{ + ActionID: actionDetails.ActionID, + BlockHeight: latestBlock.GetSdkBlock().GetHeader().Height, + BlockHash: latestBlock.GetBlockId().GetHash(), + Creator: actionDetails.GetCreator(), + CreatorSignature: task.creatorSignature, + DataHash: actionDetails.Metadata.GetCascadeMetadata().DataHash, + RQIDsIC: task.rqIDsIC, + RQIDs: task.rqIDs, + RQIDsMax: actionDetails.GetMetadata().GetCascadeMetadata().RqMax, + } + ticket, err := json.Marshal(t) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed marshall the cascade ticket") + } + + return ticket, nil +} diff --git a/supernode/services/common/config.go b/supernode/services/common/config.go new file mode 100644 index 00000000..684d1fd1 --- /dev/null +++ b/supernode/services/common/config.go @@ -0,0 +1,19 @@ +package common + +const ( + defaultNumberSuperNodes = 10 +) + +// Config contains common configuration of the services. +type Config struct { + SupernodeAccountAddress string + SupernodeIPAddress string + NumberSuperNodes int +} + +// NewConfig returns a new Config instance +func NewConfig() *Config { + return &Config{ + NumberSuperNodes: defaultNumberSuperNodes, + } +} diff --git a/supernode/services/common/network_handler.go b/supernode/services/common/network_handler.go new file mode 100644 index 00000000..d9897fbf --- /dev/null +++ b/supernode/services/common/network_handler.go @@ -0,0 +1,256 @@ +package common + +import ( + "context" + "fmt" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/lumera" + supernode "github.com/LumeraProtocol/supernode/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/pkg/types" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +// NetworkHandler common functionality related for SNs Mesh and other interconnections +type NetworkHandler struct { + task *SuperNodeTask + lumeraHandler lumera.Client + + nodeMaker node.NodeMaker + NodeClient node.ClientInterface + + acceptedMu sync.Mutex + Accepted SuperNodePeerList + + meshedNodes []types.MeshedSuperNode + // valid only for secondary node + ConnectedTo *SuperNodePeer + + superNodeAccAddress string + minNumberConnectedNodes int +} + +// NewNetworkHandler creates instance of NetworkHandler +func NewNetworkHandler(task *SuperNodeTask, + nodeClient node.ClientInterface, + nodeMaker node.NodeMaker, + lc lumera.Client, + minNumberConnectedNodes int, +) *NetworkHandler { + return &NetworkHandler{ + task: task, + nodeMaker: nodeMaker, + lumeraHandler: lc, + NodeClient: nodeClient, + minNumberConnectedNodes: minNumberConnectedNodes, + } +} + +// MeshedNodesPastelID return PastelIDs of meshed nodes +func (h *NetworkHandler) MeshedNodesPastelID() []string { + var ids []string + for _, peer := range h.meshedNodes { + ids = append(ids, peer.NodeID) + } + return ids +} + +// Session is handshake wallet to supernode +func (h *NetworkHandler) Session(_ context.Context, isPrimary bool) error { + if err := h.task.RequiredStatus(StatusTaskStarted); err != nil { + return err + } + + <-h.task.NewAction(func(ctx context.Context) error { + if isPrimary { + log.WithContext(ctx).Debug("Acts as primary node") + h.task.UpdateStatus(StatusPrimaryMode) + return nil + } + + log.WithContext(ctx).Debug("Acts as secondary node") + h.task.UpdateStatus(StatusSecondaryMode) + + return nil + }) + return nil +} + +// AcceptedNodes waits for connection supernodes, as soon as there is the required amount returns them. +func (h *NetworkHandler) AcceptedNodes(serverCtx context.Context) (SuperNodePeerList, error) { + if err := h.task.RequiredStatus(StatusPrimaryMode); err != nil { + return nil, fmt.Errorf("AcceptedNodes: %w", err) + } + + <-h.task.NewAction(func(ctx context.Context) error { + log.WithContext(ctx).Debug("Waiting for supernodes to connect") + + sub := h.task.SubscribeStatus() + for { + select { + case <-serverCtx.Done(): + return nil + case <-ctx.Done(): + return nil + case status := <-sub(): + if status.Is(StatusConnected) { + return nil + } + } + } + }) + return h.Accepted, nil +} + +// SessionNode accepts secondary node +func (h *NetworkHandler) SessionNode(_ context.Context, nodeID string) error { + h.acceptedMu.Lock() + defer h.acceptedMu.Unlock() + + if err := h.task.RequiredStatus(StatusPrimaryMode); err != nil { + return fmt.Errorf("SessionNode: %w", err) + } + + var err error + + <-h.task.NewAction(func(ctx context.Context) error { + if node := h.Accepted.ByID(nodeID); node != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).Errorf("node is already registered") + err = errors.Errorf("node %q is already registered", nodeID) + return nil + } + + var someNode *SuperNodePeer + someNode, err = h.toSupernodePeer(ctx, nodeID) + if err != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).WithError(err).Errorf("get node by extID") + err = errors.Errorf("get node by extID %s: %w", nodeID, err) + return nil + } + h.Accepted.Add(someNode) + + log.WithContext(ctx).WithField("nodeID", nodeID).Debug("Accept secondary node") + + if len(h.Accepted) >= h.minNumberConnectedNodes { + h.task.UpdateStatus(StatusConnected) + } + return nil + }) + return err +} + +// ConnectTo connects to primary node +func (h *NetworkHandler) ConnectTo(_ context.Context, nodeID, sessID string) error { + if err := h.task.RequiredStatus(StatusSecondaryMode); err != nil { + return err + } + + var err error + + <-h.task.NewAction(func(ctx context.Context) error { + var someNode *SuperNodePeer + someNode, err = h.toSupernodePeer(ctx, nodeID) + if err != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).WithError(err).Errorf("get node by extID") + return nil + } + + if err := someNode.Connect(ctx); err != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).WithError(err).Errorf("connect to node") + return nil + } + + if err = someNode.Session(ctx, h.superNodeAccAddress, sessID); err != nil { + log.WithContext(ctx).WithField("sessID", sessID).WithField("pastelID", h.superNodeAccAddress).WithError(err).Errorf("handshake with peer") + return nil + } + + h.ConnectedTo = someNode + h.task.UpdateStatus(StatusConnected) + return nil + }) + return err +} + +// MeshNodes to set info of all meshed supernodes - that will be to send +func (h *NetworkHandler) MeshNodes(_ context.Context, meshedNodes []types.MeshedSuperNode) error { + if err := h.task.RequiredStatus(StatusConnected); err != nil { + return err + } + h.meshedNodes = meshedNodes + + return nil +} + +// CheckNodeInMeshedNodes checks if the node is in the active mesh (by nodeID) +func (h *NetworkHandler) CheckNodeInMeshedNodes(nodeID string) error { + if h.meshedNodes == nil { + return errors.New("nil meshedNodes") + } + + for _, node := range h.meshedNodes { + if node.NodeID == nodeID { + return nil + } + } + + return errors.New("nodeID not found") +} + +// PastelNodeByExtKey returns information about SN by its PastelID +func (h *NetworkHandler) toSupernodePeer(ctx context.Context, supernodeAccountAddress string) (*SuperNodePeer, error) { + sn, err := h.lumeraHandler.SuperNode().GetSupernodeBySupernodeAddress(ctx, supernodeAccountAddress) + if err != nil { + return nil, err + } + + supernodeIP, err := supernode.GetLatestIP(sn) + if err != nil { + return nil, err + } + + someNode := NewSuperNode(h.NodeClient, supernodeIP, supernodeAccountAddress, h.nodeMaker) + return someNode, nil +} + +// Connect connects to grpc Server and setup pointer to concrete client wrapper +func (node *SuperNodePeer) Connect(ctx context.Context) error { + connCtx, connCancel := context.WithTimeout(ctx, defaultConnectToNodeTimeout) + defer connCancel() + + conn, err := node.ClientInterface.Connect(connCtx, node.Address) + if err != nil { + return err + } + + node.ConnectionInterface = conn + node.SuperNodePeerAPIInterface = node.MakeNode(conn) + return nil +} + +func (h *NetworkHandler) CloseSNsConnections(ctx context.Context) error { + for _, node := range h.Accepted { + if node.ConnectionInterface != nil { + if err := node.Close(); err != nil { + log.WithContext(ctx).WithError(err).Errorf("close connection to node %s", node.ID) + } + } else { + log.WithContext(ctx).Errorf("node %s has no connection", node.ID) + } + + } + + if h.ConnectedTo != nil { + if err := h.ConnectedTo.Close(); err != nil { + log.WithContext(ctx).WithError(err).Errorf("close connection to node %s", h.ConnectedTo.ID) + } + } + + return nil +} + +func (h *NetworkHandler) IsPrimary() bool { + return h.ConnectedTo == nil +} diff --git a/supernode/services/common/node_peer.go b/supernode/services/common/node_peer.go new file mode 100644 index 00000000..6dc18424 --- /dev/null +++ b/supernode/services/common/node_peer.go @@ -0,0 +1,82 @@ +package common + +import ( + "time" + + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +const ( + defaultConnectToNodeTimeout = time.Second * 35 +) + +// SuperNodePeer represents a single supernode +type SuperNodePeer struct { + node.ClientInterface + node.NodeMaker + node.ConnectionInterface + node.SuperNodePeerAPIInterface + + ID string + Address string +} + +//// Connect connects to grpc Server and setup pointer to concrete client wrapper +//func (node *SuperNodePeer) Connect(ctx context.Context) error { +// connCtx, connCancel := context.WithTimeout(ctx, defaultConnectToNodeTimeout) +// defer connCancel() +// +// conn, err := node.ClientInterface.Connect(connCtx, node.Address) +// if err != nil { +// return err +// } +// +// node.ConnectionInterface = conn +// node.SuperNodePeerAPIInterface = node.MakeNode(conn) +// return nil +//} + +// NewSuperNode returns a new Node instance. +func NewSuperNode( + client node.ClientInterface, + address string, pastelID string, + nodeMaker node.NodeMaker) *SuperNodePeer { + return &SuperNodePeer{ + ClientInterface: client, + NodeMaker: nodeMaker, + Address: address, + ID: pastelID, + } +} + +// SuperNodePeerList represents muptiple SenseRegistrationNodes +type SuperNodePeerList []*SuperNodePeer + +// Add adds a new node to the list +func (list *SuperNodePeerList) Add(node *SuperNodePeer) { + *list = append(*list, node) +} + +// ByID returns a node from the list by the given id. +func (list *SuperNodePeerList) ByID(id string) *SuperNodePeer { + for _, someNode := range *list { + if someNode.ID == id { + return someNode + } + } + return nil +} + +// Remove removes a node from the list by the given id. +func (list *SuperNodePeerList) Remove(id string) { + for i, someNode := range *list { + if someNode.ID == id { + if i+1 < len(*list) { + *list = append((*list)[:i], (*list)[i+1:]...) + } else { + *list = (*list)[:i] + } + break + } + } +} diff --git a/supernode/services/common/p2p.go b/supernode/services/common/p2p.go new file mode 100644 index 00000000..a477a591 --- /dev/null +++ b/supernode/services/common/p2p.go @@ -0,0 +1,21 @@ +package common + +const ( + // UnknownDataType ... + UnknownDataType = iota // 1 + + // P2PDataRaptorQSymbol rq symbol + P2PDataRaptorQSymbol // 1 + // P2PDataCascadeMetadata cascade ID file + P2PDataCascadeMetadata // 2 + // P2PDataDDMetadata dd fp metadata file + P2PDataDDMetadata // 3 + // P2PPreviewThumbnail preview NFT thumbnail + P2PPreviewThumbnail // 4 + // P2PMediumThumbnail NFT medium thumbnail + P2PMediumThumbnail // 5 + // P2PSmallThumbnail small NFT thumbnail + P2PSmallThumbnail // 6 + // P2PDebug debug + P2PDebug // 7 +) diff --git a/supernode/services/common/reg_task_helper.go b/supernode/services/common/reg_task_helper.go new file mode 100644 index 00000000..077a1b2d --- /dev/null +++ b/supernode/services/common/reg_task_helper.go @@ -0,0 +1,138 @@ +package common + +import ( + "bytes" + "context" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +const ( + SeparatorByte = 46 +) + +// RegTaskHelper common operations related to (any) Ticket registration +type RegTaskHelper struct { + *SuperNodeTask + + NetworkHandler *NetworkHandler + LumeraHandler *lumera.Client + + peersTicketSignatureMtx *sync.Mutex + PeersTicketSignature map[string][]byte + AllSignaturesReceivedChn chan struct{} +} + +// NewRegTaskHelper creates instance of RegTaskHelper +func NewRegTaskHelper(task *SuperNodeTask, + lumeraClient lumera.Client, +) *RegTaskHelper { + return &RegTaskHelper{ + SuperNodeTask: task, + LumeraHandler: &lumeraClient, + peersTicketSignatureMtx: &sync.Mutex{}, + PeersTicketSignature: make(map[string][]byte), + AllSignaturesReceivedChn: make(chan struct{}), + } +} + +// AddPeerTicketSignature waits for ticket signatures from other SNs and adds them into internal array +func (h *RegTaskHelper) AddPeerTicketSignature(nodeID string, signature []byte, reqStatus Status) error { + h.peersTicketSignatureMtx.Lock() + defer h.peersTicketSignatureMtx.Unlock() + + if err := h.RequiredStatus(reqStatus); err != nil { + return err + } + + var err error + + <-h.NewAction(func(ctx context.Context) error { + log.WithContext(ctx).Debugf("receive NFT ticket signature from node %s", nodeID) + if node := h.NetworkHandler.Accepted.ByID(nodeID); node == nil { + log.WithContext(ctx).WithField("node", nodeID).Errorf("node is not in Accepted list") + err = errors.Errorf("node %s not in Accepted list", nodeID) + return nil + } + + h.PeersTicketSignature[nodeID] = signature + if len(h.PeersTicketSignature) == len(h.NetworkHandler.Accepted) { + log.WithContext(ctx).Debug("all signature received") + go func() { + close(h.AllSignaturesReceivedChn) + }() + } + return nil + }) + return err +} + +// ValidateIDFiles validates received (IDs) file and its (50) IDs: +// 1. checks signatures +// 2. generates list of 50 IDs and compares them to received +func (h *RegTaskHelper) ValidateIDFiles(ctx context.Context, + data []byte, ic uint32, max uint32, ids []string, numSignRequired int, + snAccAddresses []string, + lumeraClient lumera.Client, + creatorSignaure []byte, +) ([]byte, [][]byte, error) { + + dec, err := utils.B64Decode(data) + if err != nil { + return nil, nil, errors.Errorf("decode data: %w", err) + } + + decData, err := utils.Decompress(dec) + if err != nil { + return nil, nil, errors.Errorf("decompress: %w", err) + } + + splits := bytes.Split(decData, []byte{SeparatorByte}) + if len(splits) != numSignRequired+1 { + return nil, nil, errors.New("invalid data") + } + + file, err := utils.B64Decode(splits[0]) + if err != nil { + return nil, nil, errors.Errorf("decode file: %w", err) + } + + verifications := 0 + verifiedNodes := make(map[int]bool) + for i := 1; i < numSignRequired+1; i++ { + for j := 0; j < len(snAccAddresses); j++ { + if _, ok := verifiedNodes[j]; ok { + continue + } + + err := lumeraClient.Node().Verify(snAccAddresses[j], file, creatorSignaure) // TODO : verify the signature + if err != nil { + return nil, nil, errors.Errorf("verify file signature %w", err) + } + + verifiedNodes[j] = true + verifications++ + break + } + } + + if verifications != numSignRequired { + return nil, nil, errors.Errorf("file verification failed: need %d verifications, got %d", numSignRequired, verifications) + } + + gotIDs, idFiles, err := raptorq.GetIDFiles(ctx, decData, ic, max) + if err != nil { + return nil, nil, errors.Errorf("get ids: %w", err) + } + + if err := utils.EqualStrList(gotIDs, ids); err != nil { + return nil, nil, errors.Errorf("IDs don't match: %w", err) + } + + return file, idFiles, nil +} diff --git a/supernode/services/common/service.go b/supernode/services/common/service.go new file mode 100644 index 00000000..f486bd41 --- /dev/null +++ b/supernode/services/common/service.go @@ -0,0 +1,80 @@ +package common + +import ( + "context" + "time" + + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/common/task" + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage" + "github.com/LumeraProtocol/supernode/pkg/storage/files" +) + +// SuperNodeServiceInterface common interface for Services +type SuperNodeServiceInterface interface { + RunHelper(ctx context.Context) error + NewTask() task.Task + Task(id string) task.Task +} + +// SuperNodeService common "class" for Services +type SuperNodeService struct { + *task.Worker + *files.Storage + + P2PClient p2p.Client +} + +// run starts task +func (service *SuperNodeService) run(ctx context.Context, pastelID string, prefix string) error { + ctx = log.ContextWithPrefix(ctx, prefix) + + if pastelID == "" { + return errors.New("PastelID is not specified in the config file") + } + + group, ctx := errgroup.WithContext(ctx) + group.Go(func() error { + return service.Worker.Run(ctx) + }) + if service.Storage != nil { + group.Go(func() error { + return service.Storage.Run(ctx) + }) + } + return group.Wait() +} + +// RunHelper common code for Service runner +func (service *SuperNodeService) RunHelper(ctx context.Context, pastelID string, prefix string) error { + for { + select { + case <-ctx.Done(): + log.WithContext(ctx).Error("context done - closing sn services") + return nil + case <-time.After(5 * time.Second): + if err := service.run(ctx, pastelID, prefix); err != nil { + service.Worker = task.NewWorker() + log.WithContext(ctx).WithError(err).Error("Service run failed, retrying") + } else { + log.WithContext(ctx).Info("Service run completed successfully - closing sn services") + return nil + } + } + } +} + +// NewSuperNodeService creates SuperNodeService +func NewSuperNodeService( + fileStorage storage.FileStorageInterface, + p2pClient p2p.Client, +) *SuperNodeService { + return &SuperNodeService{ + Worker: task.NewWorker(), + Storage: files.NewStorage(fileStorage), + P2PClient: p2pClient, + } +} diff --git a/supernode/services/common/status.go b/supernode/services/common/status.go new file mode 100644 index 00000000..53af3527 --- /dev/null +++ b/supernode/services/common/status.go @@ -0,0 +1,124 @@ +package common + +// List of task statuses. +const ( + StatusTaskStarted Status = iota + + // Mode + StatusPrimaryMode + StatusSecondaryMode + + // Process + StatusConnected + + StatusImageProbed + StatusAssetUploaded + StatusImageAndThumbnailCoordinateUploaded + StatusRegistrationFeeCalculated + StatusFileDecoded + + // Error + StatusErrorInvalidBurnTxID + StatusRequestTooLate + StatusNftRegGettingFailed + StatusNftRegDecodingFailed + StatusNftRegTicketInvalid + StatusListTradeTicketsFailed + StatusTradeTicketsNotFound + StatusTradeTicketMismatched + StatusTimestampVerificationFailed + StatusTimestampInvalid + StatusRQServiceConnectionFailed + StatusSymbolFileNotFound + StatusSymbolFileInvalid + StatusSymbolNotFound + StatusSymbolMismatched + StatusSymbolsNotEnough + StatusFileDecodingFailed + StatusFileReadingFailed + StatusFileMismatched + StatusFileEmpty + StatusKeyNotFound + StatusFileRestoreFailed + StatusFileExists + + // Final + StatusTaskCanceled + StatusTaskCompleted +) + +var statusNames = map[Status]string{ + StatusTaskStarted: "Task started", + StatusPrimaryMode: "Primary Mode", + StatusSecondaryMode: "Secondary Mode", + StatusConnected: "Connected", + StatusImageProbed: "Image Probed", + StatusAssetUploaded: "Asset Uploaded", + StatusImageAndThumbnailCoordinateUploaded: "Imaged And Thumbnail Coordinate Uploaded", + StatusRegistrationFeeCalculated: "Registration Fee Caculated", + StatusFileDecoded: "File Decoded", + StatusErrorInvalidBurnTxID: "Error Invalid Burn TxID", + StatusRequestTooLate: "Request too late", + StatusNftRegGettingFailed: "NFT registered getting failed", + StatusNftRegDecodingFailed: "NFT registered decoding failed", + StatusNftRegTicketInvalid: "NFT registered ticket invalid", + StatusListTradeTicketsFailed: "Could not get available trade tickets", + StatusTradeTicketsNotFound: "Trade tickets not found", + StatusTradeTicketMismatched: "Trade ticket mismatched", + StatusTimestampVerificationFailed: "Could not verify timestamp", + StatusTimestampInvalid: "Timestamp invalid", + StatusRQServiceConnectionFailed: "RQ Service connection failed", + StatusSymbolFileNotFound: "Symbol file not found", + StatusSymbolFileInvalid: "Symbol file invalid", + StatusSymbolNotFound: "Symbol not found", + StatusSymbolMismatched: "Symbol mismatched", + StatusSymbolsNotEnough: "Symbols not enough", + StatusFileDecodingFailed: "File decoding failed", + StatusFileReadingFailed: "File reading failed", + StatusFileEmpty: "File empty", + StatusFileMismatched: "File mismatched", + StatusKeyNotFound: "Key not found", + StatusFileExists: "File hash exists", + StatusFileRestoreFailed: "File restore failed", + StatusTaskCanceled: "Task Canceled", + StatusTaskCompleted: "Task Completed", +} + +// Status represents status of the task +type Status byte + +func (status Status) String() string { + if name, ok := statusNames[status]; ok { + return name + } + return "" +} + +// IsFinal returns true if the status is the final. +func (status Status) IsFinal() bool { + return status == StatusTaskCanceled || status == StatusTaskCompleted +} + +// IsFailure returns true if the task failed due to an error +func (status Status) IsFailure() bool { + return status == StatusTaskCanceled || status == StatusRequestTooLate || + status == StatusNftRegGettingFailed || status == StatusNftRegDecodingFailed || + status == StatusNftRegTicketInvalid || status == StatusListTradeTicketsFailed || + status == StatusTradeTicketsNotFound || status == StatusTradeTicketMismatched || + status == StatusTimestampVerificationFailed || status == StatusTimestampInvalid || + status == StatusRQServiceConnectionFailed || status == StatusSymbolFileNotFound || + status == StatusSymbolFileInvalid || status == StatusSymbolNotFound || + status == StatusSymbolMismatched || status == StatusSymbolsNotEnough || + status == StatusFileDecodingFailed || status == StatusFileReadingFailed || + status == StatusFileEmpty || status == StatusFileMismatched || + status == StatusKeyNotFound || status == StatusFileRestoreFailed || status == StatusFileExists +} + +// StatusNames returns a sorted list of status names. +func StatusNames() []string { + list := make([]string, len(statusNames)) + for i, name := range statusNames { + list[i] = name + } + return list +} diff --git a/supernode/services/common/status_test.go b/supernode/services/common/status_test.go new file mode 100644 index 00000000..3f6de1be --- /dev/null +++ b/supernode/services/common/status_test.go @@ -0,0 +1,350 @@ +package common + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusNames(t *testing.T) { + t.Parallel() + + testCases := []struct { + expectedStatues []Status + }{ + { + expectedStatues: []Status{ + StatusTaskStarted, + StatusPrimaryMode, + StatusSecondaryMode, + + // Process + StatusConnected, + StatusImageProbed, + StatusAssetUploaded, + StatusImageAndThumbnailCoordinateUploaded, + StatusRegistrationFeeCalculated, + StatusFileDecoded, + + // Error + StatusErrorInvalidBurnTxID, + StatusRequestTooLate, + StatusNftRegGettingFailed, + StatusNftRegDecodingFailed, + StatusNftRegTicketInvalid, + StatusListTradeTicketsFailed, + StatusTradeTicketsNotFound, + StatusTradeTicketMismatched, + StatusTimestampVerificationFailed, + StatusTimestampInvalid, + StatusRQServiceConnectionFailed, + StatusSymbolFileNotFound, + StatusSymbolFileInvalid, + StatusSymbolNotFound, + StatusSymbolMismatched, + StatusSymbolsNotEnough, + StatusFileDecodingFailed, + StatusFileReadingFailed, + StatusFileMismatched, + StatusFileEmpty, + StatusKeyNotFound, + StatusFileRestoreFailed, + StatusFileExists, + + // Final + StatusTaskCanceled, + StatusTaskCompleted, + }, + }, + } + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase:%d", i), func(t *testing.T) { + t.Parallel() + + var expectedNames []string + for _, status := range testCase.expectedStatues { + expectedNames = append(expectedNames, StatusNames()[status]) + } + + assert.Equal(t, expectedNames, StatusNames()) + }) + + } +} + +func TestStatusString(t *testing.T) { + t.Parallel() + + testCases := []struct { + status Status + expectedValue string + }{ + { + status: StatusTaskStarted, + expectedValue: StatusNames()[StatusTaskStarted], + }, { + status: StatusFileDecoded, + expectedValue: StatusNames()[StatusFileDecoded], + }, { + status: StatusRequestTooLate, + expectedValue: StatusNames()[StatusRequestTooLate], + }, { + status: StatusNftRegGettingFailed, + expectedValue: StatusNames()[StatusNftRegGettingFailed], + }, { + status: StatusNftRegDecodingFailed, + expectedValue: StatusNames()[StatusNftRegDecodingFailed], + }, { + status: StatusNftRegTicketInvalid, + expectedValue: StatusNames()[StatusNftRegTicketInvalid], + }, { + status: StatusListTradeTicketsFailed, + expectedValue: StatusNames()[StatusListTradeTicketsFailed], + }, { + status: StatusTradeTicketsNotFound, + expectedValue: StatusNames()[StatusTradeTicketsNotFound], + }, { + status: StatusTradeTicketMismatched, + expectedValue: StatusNames()[StatusTradeTicketMismatched], + }, { + status: StatusTimestampVerificationFailed, + expectedValue: StatusNames()[StatusTimestampVerificationFailed], + }, { + status: StatusTimestampInvalid, + expectedValue: StatusNames()[StatusTimestampInvalid], + }, { + status: StatusRQServiceConnectionFailed, + expectedValue: StatusNames()[StatusRQServiceConnectionFailed], + }, { + status: StatusSymbolFileNotFound, + expectedValue: StatusNames()[StatusSymbolFileNotFound], + }, { + status: StatusSymbolFileInvalid, + expectedValue: StatusNames()[StatusSymbolFileInvalid], + }, { + status: StatusSymbolNotFound, + expectedValue: StatusNames()[StatusSymbolNotFound], + }, { + status: StatusSymbolMismatched, + expectedValue: StatusNames()[StatusSymbolMismatched], + }, { + status: StatusSymbolsNotEnough, + expectedValue: StatusNames()[StatusSymbolsNotEnough], + }, { + status: StatusFileDecodingFailed, + expectedValue: StatusNames()[StatusFileDecodingFailed], + }, { + status: StatusFileReadingFailed, + expectedValue: StatusNames()[StatusFileReadingFailed], + }, { + status: StatusFileMismatched, + expectedValue: StatusNames()[StatusFileMismatched], + }, { + status: StatusFileEmpty, + expectedValue: StatusNames()[StatusFileEmpty], + }, { + status: StatusTaskCanceled, + expectedValue: StatusNames()[StatusTaskCanceled], + }, { + status: StatusTaskCompleted, + expectedValue: StatusNames()[StatusTaskCompleted], + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("status:%v/value:%s", testCase.status, testCase.expectedValue), func(t *testing.T) { + t.Parallel() + + value := testCase.status.String() + assert.Equal(t, testCase.expectedValue, value) + }) + } +} + +func TestStatusIsFinal(t *testing.T) { + t.Parallel() + + testCases := []struct { + status Status + expectedValue bool + }{ + { + status: StatusTaskStarted, + expectedValue: false, + }, { + status: StatusFileDecoded, + expectedValue: false, + }, { + status: StatusRequestTooLate, + expectedValue: false, + }, { + status: StatusNftRegGettingFailed, + expectedValue: false, + }, { + status: StatusNftRegDecodingFailed, + expectedValue: false, + }, { + status: StatusNftRegTicketInvalid, + expectedValue: false, + }, { + status: StatusListTradeTicketsFailed, + expectedValue: false, + }, { + status: StatusTradeTicketsNotFound, + expectedValue: false, + }, { + status: StatusTradeTicketMismatched, + expectedValue: false, + }, { + status: StatusTimestampVerificationFailed, + expectedValue: false, + }, { + status: StatusTimestampInvalid, + expectedValue: false, + }, { + status: StatusRQServiceConnectionFailed, + expectedValue: false, + }, { + status: StatusSymbolFileNotFound, + expectedValue: false, + }, { + status: StatusSymbolFileInvalid, + expectedValue: false, + }, { + status: StatusSymbolNotFound, + expectedValue: false, + }, { + status: StatusSymbolMismatched, + expectedValue: false, + }, { + status: StatusSymbolsNotEnough, + expectedValue: false, + }, { + status: StatusFileDecodingFailed, + expectedValue: false, + }, { + status: StatusFileReadingFailed, + expectedValue: false, + }, { + status: StatusFileMismatched, + expectedValue: false, + }, { + status: StatusFileEmpty, + expectedValue: false, + }, { + status: StatusTaskCanceled, + expectedValue: true, + }, { + status: StatusTaskCompleted, + expectedValue: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("status:%v/value:%v", testCase.status, testCase.expectedValue), func(t *testing.T) { + t.Parallel() + + value := testCase.status.IsFinal() + assert.Equal(t, testCase.expectedValue, value) + }) + } +} + +func TestStatusIsFailure(t *testing.T) { + t.Parallel() + + testCases := []struct { + status Status + expectedValue bool + }{ + { + status: StatusTaskStarted, + expectedValue: false, + }, { + status: StatusFileDecoded, + expectedValue: false, + }, { + status: StatusRequestTooLate, + expectedValue: true, + }, { + status: StatusNftRegGettingFailed, + expectedValue: true, + }, { + status: StatusNftRegDecodingFailed, + expectedValue: true, + }, { + status: StatusNftRegTicketInvalid, + expectedValue: true, + }, { + status: StatusListTradeTicketsFailed, + expectedValue: true, + }, { + status: StatusTradeTicketsNotFound, + expectedValue: true, + }, { + status: StatusTradeTicketMismatched, + expectedValue: true, + }, { + status: StatusTimestampVerificationFailed, + expectedValue: true, + }, { + status: StatusTimestampInvalid, + expectedValue: true, + }, { + status: StatusRQServiceConnectionFailed, + expectedValue: true, + }, { + status: StatusSymbolFileNotFound, + expectedValue: true, + }, { + status: StatusSymbolFileInvalid, + expectedValue: true, + }, { + status: StatusSymbolNotFound, + expectedValue: true, + }, { + status: StatusSymbolMismatched, + expectedValue: true, + }, { + status: StatusSymbolsNotEnough, + expectedValue: true, + }, { + status: StatusFileDecodingFailed, + expectedValue: true, + }, { + status: StatusFileReadingFailed, + expectedValue: true, + }, { + status: StatusFileMismatched, + expectedValue: true, + }, { + status: StatusFileEmpty, + expectedValue: true, + }, { + status: StatusTaskCanceled, + expectedValue: true, + }, { + status: StatusTaskCompleted, + expectedValue: false, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("status:%v/value:%v", testCase.status, testCase.expectedValue), func(t *testing.T) { + t.Parallel() + + value := testCase.status.IsFailure() + assert.Equal(t, testCase.expectedValue, value) + }) + } +} diff --git a/supernode/services/common/storage_handler.go b/supernode/services/common/storage_handler.go new file mode 100644 index 00000000..99e778da --- /dev/null +++ b/supernode/services/common/storage_handler.go @@ -0,0 +1,383 @@ +package common + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "math" + "os" + "path/filepath" + "sort" + "time" + + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + rqnode "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage/files" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/pkg/utils" + "github.com/cenkalti/backoff" +) + +const ( + loadSymbolsBatchSize = 2500 + storeSymbolsPercent = 10 + concurrency = 1 +) + +// StorageHandler provides common logic for RQ and P2P operations +type StorageHandler struct { + P2PClient p2p.Client + RqClient rqnode.ClientInterface + + rqAddress string + rqDir string + + TaskID string + TxID string + + store rqstore.Store + semaphore chan struct{} +} + +// NewStorageHandler creates instance of StorageHandler +func NewStorageHandler(p2p p2p.Client, rq rqnode.ClientInterface, + rqAddress string, rqDir string, store rqstore.Store) *StorageHandler { + + return &StorageHandler{ + P2PClient: p2p, + RqClient: rq, + rqAddress: rqAddress, + rqDir: rqDir, + store: store, + semaphore: make(chan struct{}, concurrency), + } +} + +// StoreFileIntoP2P stores file into P2P +func (h *StorageHandler) StoreFileIntoP2P(ctx context.Context, file *files.File, typ int) (string, error) { + data, err := file.Bytes() + if err != nil { + return "", errors.Errorf("store file %s into p2p", file.Name()) + } + return h.StoreBytesIntoP2P(ctx, data, typ) +} + +// StoreBytesIntoP2P into P2P actual data +func (h *StorageHandler) StoreBytesIntoP2P(ctx context.Context, data []byte, typ int) (string, error) { + return h.P2PClient.Store(ctx, data, typ) +} + +// StoreBatch stores into P2P array of bytes arrays +func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) error { + val := ctx.Value(log.TaskIDKey) + taskID := "" + if val != nil { + taskID = fmt.Sprintf("%v", val) + } + log.WithContext(ctx).WithField("task_id", taskID).Info("task_id in storeList") + + return h.P2PClient.StoreBatch(ctx, list, typ, taskID) +} + +// GenerateRaptorQSymbols calls RQ service to produce RQ Symbols +func (h *StorageHandler) GenerateRaptorQSymbols(ctx context.Context, data []byte, name string) (map[string][]byte, error) { + if h.RqClient == nil { + log.WithContext(ctx).Warnf("RQ Server is not initialized") + return nil, errors.Errorf("RQ Server is not initialized") + } + + b := backoff.NewExponentialBackOff() + b.MaxElapsedTime = 3 * time.Minute + b.InitialInterval = 200 * time.Millisecond + + var conn rqnode.Connection + if err := backoff.Retry(backoff.Operation(func() error { + var err error + conn, err = h.RqClient.Connect(ctx, h.rqAddress) + if err != nil { + return errors.Errorf("connect to raptorq service: %w", err) + } + + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry connect to raptorq service: %w", err) + } + defer func() { + if err := conn.Close(); err != nil { + log.WithContext(ctx).WithError(err).Error("error closing rq-connection") + } + }() + + rqService := conn.RaptorQ(&rqnode.Config{ + RqFilesDir: h.rqDir, + }) + + b.Reset() + + // encodeResp := &rqnode.EncodeResponse{} + if err := backoff.Retry(backoff.Operation(func() error { + var err error + // encodeResp, err = rqService.RQEncode(ctx, data, h.TxID, h.store) + _, err = rqService.Encode(ctx, rqnode.EncodeRequest{}) // FIXME : use the resp + // encodeResp = &encodeRes + if err != nil { + return errors.Errorf("create raptorq symbol from data %s: %w", name, err) + } + + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry do rqencode service: %w", err) + } + + return map[string][]byte{}, nil // FIXME : return proper symbols +} + +// GetRaptorQEncodeInfo calls RQ service to get Encoding info and list of RQIDs +func (h *StorageHandler) GetRaptorQEncodeInfo(ctx context.Context, + data []byte, num uint32, hash string, pastelID string, +) (encodeInfo *rqnode.EncodeResponse, err error) { + if h.RqClient == nil { + log.WithContext(ctx).Warnf("RQ Server is not initialized") + return nil, errors.Errorf("RQ Server is not initialized") + } + + b := backoff.NewExponentialBackOff() + b.MaxElapsedTime = 3 * time.Minute + b.InitialInterval = 500 * time.Millisecond + + var conn rqnode.Connection + if err := backoff.Retry(backoff.Operation(func() error { + var err error + conn, err = h.RqClient.Connect(ctx, h.rqAddress) + if err != nil { + return errors.Errorf("connect to raptorq service: %w", err) + } + + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry connect to raptorq service: %w", err) + } + defer func() { + if err := conn.Close(); err != nil { + log.WithContext(ctx).WithError(err).Error("error closing rq-connection") + } + }() + + rqService := conn.RaptorQ(&rqnode.Config{ + RqFilesDir: h.rqDir, + }) + + b.Reset() + if err := backoff.Retry(backoff.Operation(func() error { + var err error + // encodeInfo, err = rqService.EncodeMetaData(ctx, data, num, hash, pastelID) // TODO : remove + encodeI, err := rqService.EncodeMetaData(ctx, rqnode.EncodeMetadataRequest{ + Path: "", // FIXME + FilesNumber: num, + BlockHash: hash, + PastelId: pastelID, + }) + if err != nil { + return errors.Errorf("get raptorq encode info: %w", err) + } + encodeInfo = &encodeI + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry do encode info on raptorq service: %w", err) + } + + return encodeInfo, nil +} + +// ValidateRaptorQSymbolIDs calls RQ service to get Encoding info and list of RQIDs and compares them to the similar data received from WN +func (h *StorageHandler) ValidateRaptorQSymbolIDs(ctx context.Context, + data []byte, num uint32, hash string, pastelID string, + haveData []byte) error { + + if len(haveData) == 0 { + return errors.Errorf("no symbols identifiers") + } + + encodeInfo, err := h.GetRaptorQEncodeInfo(ctx, data, num, hash, pastelID) + if err != nil { + return err + } + + // scan return symbol Id files + filesMap, err := scanSymbolIDFiles(encodeInfo.Path) + if err != nil { + return errors.Errorf("scan symbol id files folder %s: %w", encodeInfo.Path, err) + } + + if len(filesMap) != int(num) { // FIXME : copies == num ? + return errors.Errorf("symbol id files count not match: expect %d, output %d", num, len(filesMap)) + } + + // pick just one file generated to compare + var gotFile, haveFile rqnode.RawSymbolIDFile + for _, v := range filesMap { + gotFile = v + break + } + + if err := json.Unmarshal(haveData, &haveFile); err != nil { + return errors.Errorf("decode raw rq file: %w", err) + } + + if err := utils.EqualStrList(gotFile.SymbolIdentifiers, haveFile.SymbolIdentifiers); err != nil { + return errors.Errorf("raptor symbol mismatched: %w", err) + } + return nil +} + +// scan symbol id files in "meta" folder, return map of file Ids & contents of file (as list of line) +func scanSymbolIDFiles(dirPath string) (map[string]rqnode.RawSymbolIDFile, error) { + filesMap := make(map[string]rqnode.RawSymbolIDFile) + + err := filepath.Walk(dirPath, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return errors.Errorf("scan a path %s: %w", path, err) + } + + if info.IsDir() { + // TODO - compare it to root + return nil + } + + fileID := filepath.Base(path) + + configFile, err := os.Open(path) + if err != nil { + return errors.Errorf("opening file: %s - err: %w", path, err) + } + defer configFile.Close() + + file := rqnode.RawSymbolIDFile{} + jsonParser := json.NewDecoder(configFile) + if err = jsonParser.Decode(&file); err != nil { + return errors.Errorf("parsing file: %s - err: %w", path, err) + } + + filesMap[fileID] = file + + return nil + }) + + if err != nil { + return nil, err + } + + return filesMap, nil +} + +func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, data []byte, name string) error { + h.semaphore <- struct{}{} // Acquire slot + defer func() { + <-h.semaphore // Release the semaphore slot + }() + + // Generate the keys for RaptorQ symbols, with empty values + log.WithContext(ctx).Info("generating RaptorQ symbols") + keysMap, err := h.GenerateRaptorQSymbols(ctx, data, name) + if err != nil { + return err + } + log.WithContext(ctx).WithField("count", len(keysMap)).Info("generated RaptorQ symbols") + + if h.TxID == "" { + return errors.New("txid is not set, cannot store rq symbols") + } + + dir, err := h.store.GetDirectoryByTxID(h.TxID) + if err != nil { + return fmt.Errorf("error fetching symbols dir from rq DB: %w", err) + } + + // Create a slice of keys from keysMap and sort it + keys := make([]string, 0, len(keysMap)) + for key := range keysMap { + keys = append(keys, key) + } + sort.Strings(keys) // Sort the keys alphabetically + + if len(keys) > loadSymbolsBatchSize { + // Calculate 15% of the total keys, rounded up + requiredKeysCount := int(math.Ceil(float64(len(keys)) * storeSymbolsPercent / 100)) + + // Get the subset of keys (15%) + if requiredKeysCount > len(keys) { + requiredKeysCount = len(keys) // Ensure we don't exceed the available keys count + } + keys = keys[:requiredKeysCount] + } + + // Iterate over sorted keys in batches + batchKeys := make(map[string][]byte) + count := 0 + + log.WithContext(ctx).WithField("count", len(keys)).Info("storing raptorQ symbols") + for _, key := range keys { + batchKeys[key] = nil + count++ + if count%loadSymbolsBatchSize == 0 { + if err := h.storeSymbolsInP2P(ctx, dir, batchKeys); err != nil { + return err + } + batchKeys = make(map[string][]byte) // Reset batchKeys after storing + } + } + + // Store any remaining symbols in the last batch + if len(batchKeys) > 0 { + if err := h.storeSymbolsInP2P(ctx, dir, batchKeys); err != nil { + return err + } + } + + if err := h.store.UpdateIsFirstBatchStored(h.TxID); err != nil { + return fmt.Errorf("error updating first batch stored flag in rq DB: %w", err) + } + log.WithContext(ctx).WithField("curr-time", time.Now().UTC()).WithField("count", len(keys)).Info("stored RaptorQ symbols") + + return nil +} + +func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, dir string, batchKeys map[string][]byte) error { + val := ctx.Value(log.TaskIDKey) + taskID := "" + if val != nil { + taskID = fmt.Sprintf("%v", val) + } + // Load symbols from the database for the current batch + log.WithContext(ctx).WithField("count", len(batchKeys)).Info("loading batch symbols") + loadedSymbols, err := utils.LoadSymbols(dir, batchKeys) + if err != nil { + return fmt.Errorf("load batch symbols from db: %w", err) + } + + log.WithContext(ctx).WithField("count", len(loadedSymbols)).Info("loaded batch symbols, storing now") + // Prepare batch for P2P storage return nil + result := make([][]byte, len(loadedSymbols)) + i := 0 + for key, value := range loadedSymbols { + result[i] = value + loadedSymbols[key] = nil // Release the reference for faster memory cleanup + i++ + } + + // Store the loaded symbols in P2P + if err := h.P2PClient.StoreBatch(ctx, result, P2PDataRaptorQSymbol, taskID); err != nil { + return fmt.Errorf("store batch raptorq symbols in p2p: %w", err) + } + log.WithContext(ctx).WithField("count", len(loadedSymbols)).Info("stored batch symbols") + + if err := utils.DeleteSymbols(ctx, dir, batchKeys); err != nil { + return fmt.Errorf("delete batch symbols from db: %w", err) + } + log.WithContext(ctx).WithField("count", len(loadedSymbols)).Info("deleted batch symbols") + + return nil +} diff --git a/supernode/services/common/supernode_task.go b/supernode/services/common/supernode_task.go new file mode 100644 index 00000000..1cb663f9 --- /dev/null +++ b/supernode/services/common/supernode_task.go @@ -0,0 +1,64 @@ +package common + +import ( + "context" + "fmt" + + "github.com/LumeraProtocol/supernode/pkg/common/task" + "github.com/LumeraProtocol/supernode/pkg/common/task/state" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage/files" + "github.com/LumeraProtocol/supernode/pkg/storage/queries" +) + +// TaskCleanerFunc pointer to func that removes artefacts +type TaskCleanerFunc func() + +// SuperNodeTask base "class" for Task +type SuperNodeTask struct { + task.Task + + LogPrefix string +} + +// RunHelper common code for Task runner +func (task *SuperNodeTask) RunHelper(ctx context.Context, clean TaskCleanerFunc) error { + ctx = task.context(ctx) + log.WithContext(ctx).Debug("Start task") + defer log.WithContext(ctx).Info("Task canceled") + defer task.Cancel() + + task.SetStatusNotifyFunc(func(status *state.Status) { + log.WithContext(ctx).WithField("status", status.String()).Debug("States updated") + }) + + defer clean() + + return task.RunAction(ctx) +} + +func (task *SuperNodeTask) context(ctx context.Context) context.Context { + return log.ContextWithPrefix(ctx, fmt.Sprintf("%s-%s", task.LogPrefix, task.ID())) +} + +// RemoveFile removes file from FS (TODO: move to gonode.common) +func (task *SuperNodeTask) RemoveFile(file *files.File) { + if file != nil { + log.Debugf("remove file: %s", file.Name()) + if err := file.Remove(); err != nil { + log.Debugf("remove file failed: %s", err.Error()) + } + } +} + +// NewSuperNodeTask returns a new Task instance. +func NewSuperNodeTask(logPrefix string, historyDB queries.LocalStoreInterface) *SuperNodeTask { + snt := &SuperNodeTask{ + Task: task.New(StatusTaskStarted), + LogPrefix: logPrefix, + } + + snt.InitialiseHistoryDB(historyDB) + + return snt +}