diff --git a/README.md b/README.md index 687f32b4..0aecceb2 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,13 @@ service SupernodeService { rpc GetStatus(StatusRequest) returns (StatusResponse); } -message StatusRequest {} +// Optional request flags +message StatusRequest { + // When true, the response includes detailed P2P metrics and + // network peer information. This is gated to avoid heavy work + // unless explicitly requested. + bool include_p2p_metrics = 1; +} message StatusResponse { string version = 1; // Supernode version @@ -65,6 +71,68 @@ message StatusResponse { Network network = 6; // P2P network information int32 rank = 7; // Rank in the top supernodes list (0 if not in top list) string ip_address = 8; // Supernode IP address with port (e.g., "192.168.1.1:4445") + + // Optional detailed P2P metrics (present only when requested) + message P2PMetrics { + message DhtMetrics { + message StoreSuccessPoint { + int64 time_unix = 1; // event time (unix seconds) + int32 requests = 2; // total node RPCs attempted + int32 successful = 3; // successful node RPCs + double success_rate = 4; // percentage (0-100) + } + + message BatchRetrievePoint { + int64 time_unix = 1; // event time (unix seconds) + int32 keys = 2; // keys requested + int32 required = 3; // required count + int32 found_local = 4; // found locally + int32 found_network = 5; // found on network + int64 duration_ms = 6; // duration in milliseconds + } + + repeated StoreSuccessPoint store_success_recent = 1; + repeated BatchRetrievePoint batch_retrieve_recent = 2; + int64 hot_path_banned_skips = 3; + int64 hot_path_ban_increments = 4; + } + + message HandleCounters { + int64 total = 1; + int64 success = 2; + int64 failure = 3; + int64 timeout = 4; + } + + message BanEntry { + string id = 1; + string ip = 2; + uint32 port = 3; + int32 count = 4; + int64 created_at_unix = 5; + int64 age_seconds = 6; + } + + message DatabaseStats { + double p2p_db_size_mb = 1; + int64 p2p_db_records_count = 2; + } + + message DiskStatus { + double all_mb = 1; + double used_mb = 2; + double free_mb = 3; + } + + DhtMetrics dht_metrics = 1; + map network_handle_metrics = 2; + map conn_pool_metrics = 3; + repeated BanEntry ban_list = 4; + DatabaseStats database = 5; + DiskStatus disk = 6; + } + + P2PMetrics p2p_metrics = 9; // Only present when include_p2p_metrics=true } ``` @@ -137,18 +205,21 @@ enum SupernodeEventType { ## HTTP Gateway -The supernode provides an HTTP gateway that exposes the gRPC services via REST API. The gateway runs on a separate port 8002 +The supernode provides an HTTP gateway that exposes the gRPC services via REST API on port `8002`. -### Endpoints +See `docs/gateway.md` for the full gateway guide and additional examples. -#### GET /api/v1/status +### Example: GET /api/v1/status Returns the current supernode status including system resources (CPU, memory, storage) and service information. +- Query parameter `include_p2p_metrics=true` enables detailed P2P metrics and peer info. +- When omitted or false, peer count, peer addresses, and `p2p_metrics` are not included. + ```bash -curl http://localhost:8002/api/v1/status +curl "http://localhost:8002/api/v1/status" ``` -Response: +Response (without P2P metrics): ```json { "version": "1.0.0", @@ -183,15 +254,49 @@ Response: } ], "registered_services": ["cascade", "sense"], + "network": {}, + "rank": 6, + "ip_address": "192.168.1.100:4445" +} +``` + +To include P2P metrics and peer information: + +```bash +curl "http://localhost:8002/api/v1/status?include_p2p_metrics=true" +``` + +Response (with P2P metrics): + +```json +{ + "version": "1.0.0", + "uptime_seconds": "3600", + "resources": { /* ... */ }, + "running_tasks": [ /* ... */ ], + "registered_services": ["cascade", "sense"], "network": { "peers_count": 11, "peer_addresses": [ - "lumera13z4pkmgkr587sg6lkqnmqmqkkfpsau3rmjd5kx@156.67.29.226:4445", - "lumera1s55nzsyqsuwxsl3es0v7rxux7rypsa7zpzlqg5@18.216.80.56:4445" + "lumera13z...@156.67.29.226:4445", + "lumera1s5...@18.216.80.56:4445" ] }, "rank": 6, - "ip_address": "192.168.1.100:4445" + "ip_address": "192.168.1.100:4445", + "p2p_metrics": { + "dht_metrics": { + "store_success_recent": [ /* ... */ ], + "batch_retrieve_recent": [ /* ... */ ], + "hot_path_banned_skips": 0, + "hot_path_ban_increments": 0 + }, + "network_handle_metrics": { "STORE": {"total": 42, "success": 40, "failure": 1, "timeout": 1} }, + "conn_pool_metrics": { "active": 12, "idle": 3 }, + "ban_list": [ /* ... */ ], + "database": { "p2p_db_size_mb": 123.4, "p2p_db_records_count": "1000" }, + "disk": { "all_mb": 102400, "used_mb": 51200, "free_mb": 51200 } + } } ``` diff --git a/docs/gateway.md b/docs/gateway.md new file mode 100644 index 00000000..9f7d5be3 --- /dev/null +++ b/docs/gateway.md @@ -0,0 +1,31 @@ +# Supernode HTTP Gateway + +The HTTP gateway exposes the gRPC services via REST on port `8002` using grpc-gateway. + +## Endpoints + +### GET `/api/v1/status` +Returns supernode status: system resources (CPU, memory, storage), service info, and optionally P2P metrics. + +- Query `include_p2p_metrics=true` enables detailed P2P metrics and peer info. +- When omitted or false, peer count, peer addresses, and `p2p_metrics` are not included. + +Examples: + +```bash +# Lightweight status +curl "http://localhost:8002/api/v1/status" + +# Include P2P metrics and peer info +curl "http://localhost:8002/api/v1/status?include_p2p_metrics=true" +``` + +Example responses are shown in the main README under the SupernodeService section. + +## API Documentation + +- Swagger UI: `http://localhost:8002/swagger-ui/` +- OpenAPI Spec: `http://localhost:8002/swagger.json` + +The Swagger UI provides an interactive interface to explore and test all available API endpoints. + diff --git a/gen/supernode/action/cascade/service.pb.go b/gen/supernode/action/cascade/service.pb.go index 9d97b6f2..ce3c177b 100644 --- a/gen/supernode/action/cascade/service.pb.go +++ b/gen/supernode/action/cascade/service.pb.go @@ -38,7 +38,8 @@ const ( SupernodeEventType_FINALIZE_SIMULATED SupernodeEventType = 10 SupernodeEventType_ARTEFACTS_STORED SupernodeEventType = 11 SupernodeEventType_ACTION_FINALIZED SupernodeEventType = 12 - SupernodeEventType_ARTEFACTS_DOWNLOADED SupernodeEventType = 13 + SupernodeEventType_ARTEFACTS_DOWNLOADED SupernodeEventType = 13 + SupernodeEventType_FINALIZE_SIMULATION_FAILED SupernodeEventType = 14 ) // Enum value maps for SupernodeEventType. @@ -57,7 +58,8 @@ var ( 10: "FINALIZE_SIMULATED", 11: "ARTEFACTS_STORED", 12: "ACTION_FINALIZED", - 13: "ARTEFACTS_DOWNLOADED", + 13: "ARTEFACTS_DOWNLOADED", + 14: "FINALIZE_SIMULATION_FAILED", } SupernodeEventType_value = map[string]int32{ "UNKNOWN": 0, @@ -73,7 +75,8 @@ var ( "FINALIZE_SIMULATED": 10, "ARTEFACTS_STORED": 11, "ACTION_FINALIZED": 12, - "ARTEFACTS_DOWNLOADED": 13, + "ARTEFACTS_DOWNLOADED": 13, + "FINALIZE_SIMULATION_FAILED": 14, } ) diff --git a/gen/supernode/supernode.pb.go b/gen/supernode/supernode.pb.go index 51027e4b..5410f5c6 100644 --- a/gen/supernode/supernode.pb.go +++ b/gen/supernode/supernode.pb.go @@ -7,11 +7,12 @@ package supernode import ( + reflect "reflect" + sync "sync" + _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" ) const ( @@ -25,6 +26,10 @@ type StatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Optional: include detailed P2P metrics in the response + // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true + IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"` } func (x *StatusRequest) Reset() { @@ -57,6 +62,13 @@ func (*StatusRequest) Descriptor() ([]byte, []int) { return file_supernode_supernode_proto_rawDescGZIP(), []int{0} } +func (x *StatusRequest) GetIncludeP2PMetrics() bool { + if x != nil { + return x.IncludeP2PMetrics + } + return false +} + type ListServicesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -213,6 +225,7 @@ type StatusResponse struct { Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") + P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` } func (x *StatusResponse) Reset() { @@ -301,6 +314,13 @@ func (x *StatusResponse) GetIpAddress() string { return "" } +func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics { + if x != nil { + return x.P2PMetrics + } + return nil +} + // System resource information type StatusResponse_Resources struct { state protoimpl.MessageState @@ -487,6 +507,92 @@ func (x *StatusResponse_Network) GetPeerAddresses() []string { return nil } +// P2P metrics and diagnostics (additive field) +type StatusResponse_P2PMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` + NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` + Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` + Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` +} + +func (x *StatusResponse_P2PMetrics) Reset() { + *x = StatusResponse_P2PMetrics{} + mi := &file_supernode_supernode_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3} +} + +func (x *StatusResponse_P2PMetrics) GetDhtMetrics() *StatusResponse_P2PMetrics_DhtMetrics { + if x != nil { + return x.DhtMetrics + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetNetworkHandleMetrics() map[string]*StatusResponse_P2PMetrics_HandleCounters { + if x != nil { + return x.NetworkHandleMetrics + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetConnPoolMetrics() map[string]int64 { + if x != nil { + return x.ConnPoolMetrics + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetBanList() []*StatusResponse_P2PMetrics_BanEntry { + if x != nil { + return x.BanList + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetDatabase() *StatusResponse_P2PMetrics_DatabaseStats { + if x != nil { + return x.Database + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskStatus { + if x != nil { + return x.Disk + } + return nil +} + type StatusResponse_Resources_CPU struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -498,7 +604,7 @@ type StatusResponse_Resources_CPU struct { func (x *StatusResponse_Resources_CPU) Reset() { *x = StatusResponse_Resources_CPU{} - mi := &file_supernode_supernode_proto_msgTypes[8] + mi := &file_supernode_supernode_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -510,7 +616,7 @@ func (x *StatusResponse_Resources_CPU) String() string { func (*StatusResponse_Resources_CPU) ProtoMessage() {} func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[8] + mi := &file_supernode_supernode_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -553,7 +659,7 @@ type StatusResponse_Resources_Memory struct { func (x *StatusResponse_Resources_Memory) Reset() { *x = StatusResponse_Resources_Memory{} - mi := &file_supernode_supernode_proto_msgTypes[9] + mi := &file_supernode_supernode_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -565,7 +671,7 @@ func (x *StatusResponse_Resources_Memory) String() string { func (*StatusResponse_Resources_Memory) ProtoMessage() {} func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[9] + mi := &file_supernode_supernode_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -623,7 +729,7 @@ type StatusResponse_Resources_Storage struct { func (x *StatusResponse_Resources_Storage) Reset() { *x = StatusResponse_Resources_Storage{} - mi := &file_supernode_supernode_proto_msgTypes[10] + mi := &file_supernode_supernode_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -635,7 +741,7 @@ func (x *StatusResponse_Resources_Storage) String() string { func (*StatusResponse_Resources_Storage) ProtoMessage() {} func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[10] + mi := &file_supernode_supernode_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -686,6 +792,502 @@ func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 { return 0 } +// Rolling DHT metrics snapshot +type StatusResponse_P2PMetrics_DhtMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"` + BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"` + HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter + HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { + *x = StatusResponse_P2PMetrics_DhtMetrics{} + mi := &file_supernode_supernode_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DhtMetrics) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0} +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetStoreSuccessRecent() []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint { + if x != nil { + return x.StoreSuccessRecent + } + return nil +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetBatchRetrieveRecent() []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint { + if x != nil { + return x.BatchRetrieveRecent + } + return nil +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBannedSkips() int64 { + if x != nil { + return x.HotPathBannedSkips + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 { + if x != nil { + return x.HotPathBanIncrements + } + return 0 +} + +// Per-handler counters from network layer +type StatusResponse_P2PMetrics_HandleCounters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` + Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { + *x = StatusResponse_P2PMetrics_HandleCounters{} + mi := &file_supernode_supernode_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_HandleCounters.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_HandleCounters) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 1} +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetSuccess() int64 { + if x != nil { + return x.Success + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetFailure() int64 { + if x != nil { + return x.Failure + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 { + if x != nil { + return x.Timeout + } + return 0 +} + +// Ban list entry +type StatusResponse_P2PMetrics_BanEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID + Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count + CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) + AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds +} + +func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { + *x = StatusResponse_P2PMetrics_BanEntry{} + mi := &file_supernode_supernode_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_BanEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_BanEntry.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_BanEntry) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 2} +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetIp() string { + if x != nil { + return x.Ip + } + return "" +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetCreatedAtUnix() int64 { + if x != nil { + return x.CreatedAtUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 { + if x != nil { + return x.AgeSeconds + } + return 0 +} + +// DB stats +type StatusResponse_P2PMetrics_DatabaseStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` + P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { + *x = StatusResponse_P2PMetrics_DatabaseStats{} + mi := &file_supernode_supernode_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DatabaseStats.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DatabaseStats) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 3} +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbSizeMb() float64 { + if x != nil { + return x.P2PDbSizeMb + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 { + if x != nil { + return x.P2PDbRecordsCount + } + return 0 +} + +// Disk status +type StatusResponse_P2PMetrics_DiskStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` + UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` + FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { + *x = StatusResponse_P2PMetrics_DiskStatus{} + mi := &file_supernode_supernode_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DiskStatus.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DiskStatus) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 4} +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) GetAllMb() float64 { + if x != nil { + return x.AllMb + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) GetUsedMb() float64 { + if x != nil { + return x.UsedMb + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 { + if x != nil { + return x.FreeMb + } + return 0 +} + +type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted + Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs + SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { + *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{} + mi := &file_supernode_supernode_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0, 0} +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetTimeUnix() int64 { + if x != nil { + return x.TimeUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetRequests() int32 { + if x != nil { + return x.Requests + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessful() int32 { + if x != nil { + return x.Successful + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate() float64 { + if x != nil { + return x.SuccessRate + } + return 0 +} + +type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested + Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count + FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally + FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network + DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { + *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{} + mi := &file_supernode_supernode_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0, 1} +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetTimeUnix() int64 { + if x != nil { + return x.TimeUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetKeys() int32 { + if x != nil { + return x.Keys + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetRequired() int32 { + if x != nil { + return x.Required + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundLocal() int32 { + if x != nil { + return x.FoundLocal + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundNetwork() int32 { + if x != nil { + return x.FoundNetwork + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs() int64 { + if x != nil { + return x.DurationMs + } + return 0 +} + var File_supernode_supernode_proto protoreflect.FileDescriptor var file_supernode_supernode_proto_rawDesc = []byte{ @@ -693,8 +1295,11 @@ var file_supernode_supernode_proto_rawDesc = []byte{ 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x60, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, @@ -705,7 +1310,7 @@ var file_supernode_supernode_proto_rawDesc = []byte{ 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22, 0xc7, 0x09, 0x0a, 0x0e, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22, 0x84, 0x19, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, @@ -730,76 +1335,200 @@ var file_supernode_supernode_proto_rawDesc = []byte{ 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, - 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d, - 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d, - 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a, - 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x12, 0x45, 0x0a, 0x0b, 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, + 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, + 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, + 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, + 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, + 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, + 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, + 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, + 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, + 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, + 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, + 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x6b, 0x0a, 0x0c, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, + 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, + 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, + 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xf3, 0x0e, 0x0a, + 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, + 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a, + 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, + 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, + 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50, + 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61, + 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74, + 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, + 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, + 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x1a, 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, + 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x73, 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, - 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68, - 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40, - 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, - 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, - 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67, - 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12, - 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, - 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, - 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, - 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x6b, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b, - 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, - 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, - 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x65, 0x73, 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, - 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, - 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, + 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, + 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, + 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, + 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, + 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, + 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, + 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, + 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, + 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, + 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, + 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, + 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, + 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, + 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, + 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x32, 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, + 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, + 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, + 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, + 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, + 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, + 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, + 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x36, 0x5a, 0x34, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, + 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -814,37 +1543,57 @@ func file_supernode_supernode_proto_rawDescGZIP() []byte { return file_supernode_supernode_proto_rawDescData } -var file_supernode_supernode_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_supernode_supernode_proto_msgTypes = make([]protoimpl.MessageInfo, 21) var file_supernode_supernode_proto_goTypes = []any{ - (*StatusRequest)(nil), // 0: supernode.StatusRequest - (*ListServicesRequest)(nil), // 1: supernode.ListServicesRequest - (*ListServicesResponse)(nil), // 2: supernode.ListServicesResponse - (*ServiceInfo)(nil), // 3: supernode.ServiceInfo - (*StatusResponse)(nil), // 4: supernode.StatusResponse - (*StatusResponse_Resources)(nil), // 5: supernode.StatusResponse.Resources - (*StatusResponse_ServiceTasks)(nil), // 6: supernode.StatusResponse.ServiceTasks - (*StatusResponse_Network)(nil), // 7: supernode.StatusResponse.Network - (*StatusResponse_Resources_CPU)(nil), // 8: supernode.StatusResponse.Resources.CPU - (*StatusResponse_Resources_Memory)(nil), // 9: supernode.StatusResponse.Resources.Memory - (*StatusResponse_Resources_Storage)(nil), // 10: supernode.StatusResponse.Resources.Storage + (*StatusRequest)(nil), // 0: supernode.StatusRequest + (*ListServicesRequest)(nil), // 1: supernode.ListServicesRequest + (*ListServicesResponse)(nil), // 2: supernode.ListServicesResponse + (*ServiceInfo)(nil), // 3: supernode.ServiceInfo + (*StatusResponse)(nil), // 4: supernode.StatusResponse + (*StatusResponse_Resources)(nil), // 5: supernode.StatusResponse.Resources + (*StatusResponse_ServiceTasks)(nil), // 6: supernode.StatusResponse.ServiceTasks + (*StatusResponse_Network)(nil), // 7: supernode.StatusResponse.Network + (*StatusResponse_P2PMetrics)(nil), // 8: supernode.StatusResponse.P2PMetrics + (*StatusResponse_Resources_CPU)(nil), // 9: supernode.StatusResponse.Resources.CPU + (*StatusResponse_Resources_Memory)(nil), // 10: supernode.StatusResponse.Resources.Memory + (*StatusResponse_Resources_Storage)(nil), // 11: supernode.StatusResponse.Resources.Storage + (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 12: supernode.StatusResponse.P2PMetrics.DhtMetrics + (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 13: supernode.StatusResponse.P2PMetrics.HandleCounters + (*StatusResponse_P2PMetrics_BanEntry)(nil), // 14: supernode.StatusResponse.P2PMetrics.BanEntry + (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 15: supernode.StatusResponse.P2PMetrics.DatabaseStats + (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 16: supernode.StatusResponse.P2PMetrics.DiskStatus + nil, // 17: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + nil, // 18: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 19: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 20: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint } var file_supernode_supernode_proto_depIdxs = []int32{ 3, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo 5, // 1: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources 6, // 2: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks 7, // 3: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network - 8, // 4: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU - 9, // 5: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory - 10, // 6: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage - 0, // 7: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest - 1, // 8: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest - 4, // 9: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse - 2, // 10: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse - 9, // [9:11] is the sub-list for method output_type - 7, // [7:9] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 8, // 4: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics + 9, // 5: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU + 10, // 6: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory + 11, // 7: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage + 12, // 8: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics + 17, // 9: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + 18, // 10: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + 14, // 11: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry + 15, // 12: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats + 16, // 13: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus + 19, // 14: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + 20, // 15: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + 13, // 16: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters + 0, // 17: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest + 1, // 18: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest + 4, // 19: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse + 2, // 20: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse + 19, // [19:21] is the sub-list for method output_type + 17, // [17:19] is the sub-list for method input_type + 17, // [17:17] is the sub-list for extension type_name + 17, // [17:17] is the sub-list for extension extendee + 0, // [0:17] is the sub-list for field type_name } func init() { file_supernode_supernode_proto_init() } @@ -858,7 +1607,7 @@ func file_supernode_supernode_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_supernode_supernode_proto_rawDesc, NumEnums: 0, - NumMessages: 11, + NumMessages: 21, NumExtensions: 0, NumServices: 1, }, diff --git a/gen/supernode/supernode.pb.gw.go b/gen/supernode/supernode.pb.gw.go index 65c4649a..0976b8b7 100644 --- a/gen/supernode/supernode.pb.gw.go +++ b/gen/supernode/supernode.pb.gw.go @@ -33,10 +33,21 @@ var _ = utilities.NewDoubleArray var _ = descriptor.ForMessage var _ = metadata.Join +var ( + filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + func request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq StatusRequest var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.GetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err @@ -46,6 +57,13 @@ func local_request_SupernodeService_GetStatus_0(ctx context.Context, marshaler r var protoReq StatusRequest var metadata runtime.ServerMetadata + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.GetStatus(ctx, &protoReq) return msg, metadata, err diff --git a/gen/supernode/supernode.swagger.json b/gen/supernode/supernode.swagger.json index af023816..e29dcbae 100644 --- a/gen/supernode/supernode.swagger.json +++ b/gen/supernode/supernode.swagger.json @@ -55,6 +55,15 @@ } } }, + "parameters": [ + { + "name": "includeP2pMetrics", + "description": "Optional: include detailed P2P metrics in the response\nMaps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true", + "in": "query", + "required": false, + "type": "boolean" + } + ], "tags": [ "SupernodeService" ] @@ -62,6 +71,184 @@ } }, "definitions": { + "DhtMetricsBatchRetrievePoint": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64", + "title": "event time (unix seconds)" + }, + "keys": { + "type": "integer", + "format": "int32", + "title": "keys requested" + }, + "required": { + "type": "integer", + "format": "int32", + "title": "required count" + }, + "foundLocal": { + "type": "integer", + "format": "int32", + "title": "found locally" + }, + "foundNetwork": { + "type": "integer", + "format": "int32", + "title": "found on network" + }, + "durationMs": { + "type": "string", + "format": "int64", + "title": "duration in milliseconds" + } + } + }, + "DhtMetricsStoreSuccessPoint": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64", + "title": "event time (unix seconds)" + }, + "requests": { + "type": "integer", + "format": "int32", + "title": "total node RPCs attempted" + }, + "successful": { + "type": "integer", + "format": "int32", + "title": "successful node RPCs" + }, + "successRate": { + "type": "number", + "format": "double", + "title": "percentage (0-100)" + } + } + }, + "P2PMetricsBanEntry": { + "type": "object", + "properties": { + "id": { + "type": "string", + "title": "printable ID" + }, + "ip": { + "type": "string", + "title": "last seen IP" + }, + "port": { + "type": "integer", + "format": "int64", + "title": "last seen port" + }, + "count": { + "type": "integer", + "format": "int32", + "title": "failure count" + }, + "createdAtUnix": { + "type": "string", + "format": "int64", + "title": "first ban time (unix seconds)" + }, + "ageSeconds": { + "type": "string", + "format": "int64", + "title": "age in seconds" + } + }, + "title": "Ban list entry" + }, + "P2PMetricsDatabaseStats": { + "type": "object", + "properties": { + "p2pDbSizeMb": { + "type": "number", + "format": "double" + }, + "p2pDbRecordsCount": { + "type": "string", + "format": "int64" + } + }, + "title": "DB stats" + }, + "P2PMetricsDhtMetrics": { + "type": "object", + "properties": { + "storeSuccessRecent": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DhtMetricsStoreSuccessPoint" + } + }, + "batchRetrieveRecent": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DhtMetricsBatchRetrievePoint" + } + }, + "hotPathBannedSkips": { + "type": "string", + "format": "int64", + "title": "counter" + }, + "hotPathBanIncrements": { + "type": "string", + "format": "int64", + "title": "counter" + } + }, + "title": "Rolling DHT metrics snapshot" + }, + "P2PMetricsDiskStatus": { + "type": "object", + "properties": { + "allMb": { + "type": "number", + "format": "double" + }, + "usedMb": { + "type": "number", + "format": "double" + }, + "freeMb": { + "type": "number", + "format": "double" + } + }, + "title": "Disk status" + }, + "P2PMetricsHandleCounters": { + "type": "object", + "properties": { + "total": { + "type": "string", + "format": "int64" + }, + "success": { + "type": "string", + "format": "int64" + }, + "failure": { + "type": "string", + "format": "int64" + }, + "timeout": { + "type": "string", + "format": "int64" + } + }, + "title": "Per-handler counters from network layer" + }, "ResourcesCPU": { "type": "object", "properties": { @@ -146,6 +333,41 @@ }, "title": "Network information" }, + "StatusResponseP2PMetrics": { + "type": "object", + "properties": { + "dhtMetrics": { + "$ref": "#/definitions/P2PMetricsDhtMetrics" + }, + "networkHandleMetrics": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/P2PMetricsHandleCounters" + } + }, + "connPoolMetrics": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "int64" + } + }, + "banList": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/P2PMetricsBanEntry" + } + }, + "database": { + "$ref": "#/definitions/P2PMetricsDatabaseStats" + }, + "disk": { + "$ref": "#/definitions/P2PMetricsDiskStatus" + } + }, + "title": "P2P metrics and diagnostics (additive field)" + }, "StatusResponseResources": { "type": "object", "properties": { @@ -288,6 +510,9 @@ "ipAddress": { "type": "string", "title": "Supernode IP address with port (e.g., \"192.168.1.1:4445\")" + }, + "p2pMetrics": { + "$ref": "#/definitions/StatusResponseP2PMetrics" } }, "title": "The StatusResponse represents system status with clear organization" diff --git a/proto/supernode/action/cascade/service.proto b/proto/supernode/action/cascade/service.proto index 166ec0a2..06148539 100644 --- a/proto/supernode/action/cascade/service.proto +++ b/proto/supernode/action/cascade/service.proto @@ -61,4 +61,5 @@ enum SupernodeEventType { ARTEFACTS_STORED = 11; ACTION_FINALIZED = 12; ARTEFACTS_DOWNLOADED = 13; + FINALIZE_SIMULATION_FAILED = 14; } diff --git a/proto/supernode/supernode.proto b/proto/supernode/supernode.proto index 17081a5f..c68e8fce 100644 --- a/proto/supernode/supernode.proto +++ b/proto/supernode/supernode.proto @@ -19,7 +19,11 @@ service SupernodeService { } } -message StatusRequest {} +message StatusRequest { + // Optional: include detailed P2P metrics in the response + // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true + bool include_p2p_metrics = 1; +} message ListServicesRequest {} @@ -85,4 +89,72 @@ message StatusResponse { Network network = 6; // P2P network information int32 rank = 7; // Rank in the top supernodes list (0 if not in top list) string ip_address = 8; // Supernode IP address with port (e.g., "192.168.1.1:4445") -} \ No newline at end of file + + // P2P metrics and diagnostics (additive field) + message P2PMetrics { + // Rolling DHT metrics snapshot + message DhtMetrics { + message StoreSuccessPoint { + int64 time_unix = 1; // event time (unix seconds) + int32 requests = 2; // total node RPCs attempted + int32 successful = 3; // successful node RPCs + double success_rate = 4; // percentage (0-100) + } + + message BatchRetrievePoint { + int64 time_unix = 1; // event time (unix seconds) + int32 keys = 2; // keys requested + int32 required = 3; // required count + int32 found_local = 4; // found locally + int32 found_network = 5; // found on network + int64 duration_ms = 6; // duration in milliseconds + } + + repeated StoreSuccessPoint store_success_recent = 1; + repeated BatchRetrievePoint batch_retrieve_recent = 2; + + int64 hot_path_banned_skips = 3; // counter + int64 hot_path_ban_increments = 4; // counter + } + + // Per-handler counters from network layer + message HandleCounters { + int64 total = 1; + int64 success = 2; + int64 failure = 3; + int64 timeout = 4; + } + + // Ban list entry + message BanEntry { + string id = 1; // printable ID + string ip = 2; // last seen IP + uint32 port = 3; // last seen port + int32 count = 4; // failure count + int64 created_at_unix = 5; // first ban time (unix seconds) + int64 age_seconds = 6; // age in seconds + } + + // DB stats + message DatabaseStats { + double p2p_db_size_mb = 1; + int64 p2p_db_records_count = 2; + } + + // Disk status + message DiskStatus { + double all_mb = 1; + double used_mb = 2; + double free_mb = 3; + } + + DhtMetrics dht_metrics = 1; + map network_handle_metrics = 2; + map conn_pool_metrics = 3; + repeated BanEntry ban_list = 4; + DatabaseStats database = 5; + DiskStatus disk = 6; + } + + P2PMetrics p2p_metrics = 9; +} diff --git a/sdk/README.md b/sdk/README.md index a026d162..b0aecb20 100644 --- a/sdk/README.md +++ b/sdk/README.md @@ -16,6 +16,7 @@ The Lumera Supernode SDK is a comprehensive toolkit for interacting with the Lu - [SubscribeToAllEvents](#subscribetoallevents) - [Event System](#event-system) - [Error Handling](#error-handling) +- [Timeouts & Networking](#timeouts--networking) ## Configuration @@ -305,6 +306,25 @@ if err != nil { - `*supernodeservice.SupernodeStatusresponse`: Status information including CPU usage, memory stats, and active services - `error`: Error if the supernode is unreachable or query fails +Include detailed P2P metrics (optional): + +By default, peer info and P2P metrics are not returned to keep calls lightweight. To include them, set an option in the context: + +```go +import snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" + +// Opt-in via context +ctxWithMetrics := snsvc.WithIncludeP2PMetrics(ctx) +status, err := client.GetSupernodeStatus(ctxWithMetrics, "lumera1abc...") +if err != nil { + // handle error +} + +// Access optional fields when present +fmt.Println("Peers:", status.Network.PeersCount) +fmt.Println("DHT hot path bans:", status.P2PMetrics.DhtMetrics.HotPathBanIncrements) +``` + ### SubscribeToEvents Registers an event handler for specific event types to monitor task progress. @@ -364,6 +384,13 @@ The SDK provides an event system to monitor task progress through event subscrip - `SDKTaskTxHashReceived`: Transaction hash received from supernode - `SDKTaskCompleted`: Task completed successfully - `SDKTaskFailed`: Task failed with error + - `SDKConnectionEstablished`: Connection to supernode established + - `SDKUploadStarted`: Upload started (size, chunk size, est chunks) + - `SDKUploadCompleted`: Upload completed (size, chunks, elapsed, avg throughput) + - `SDKUploadFailed`: Upload failed (reason=timeout|send_error|read_error|file_open|file_stat|close_send) + - `SDKProcessingStarted`: Waiting for server progress/final tx hash + - `SDKProcessingFailed`: Processing failed (reason=stream_recv|missing_final_response) + - `SDKProcessingTimeout`: Processing exceeded time budget and was cancelled - `SDKDownloadAttempt`: Attempting to download from supernode - `SDKDownloadFailure`: Download attempt failed - `SDKOutputPathReceived`: File download path received @@ -381,9 +408,10 @@ The SDK provides an event system to monitor task progress through event subscrip - `SupernodeRQIDVerified`: RaptorQ ID verified - `SupernodeFinalizeSimulated`: Finalize transaction simulated successfully (pre-storage) - `SupernodeArtefactsStored`: Artifacts stored successfully -- `SupernodeActionFinalized`: Action processing finalized -- `SupernodeArtefactsDownloaded`: Artifacts downloaded -- `SupernodeUnknown`: Unknown supernode event + - `SupernodeActionFinalized`: Action processing finalized + - `SupernodeArtefactsDownloaded`: Artifacts downloaded + - `SupernodeFinalizeSimulationFailed`: Finalize action simulation failed + - `SupernodeUnknown`: Unknown supernode event Note: For backward compatibility, older supernodes may emit the finalize simulation as an `RQID_VERIFIED` event with the message `"finalize action simulation passed"`. The SDK adapter maps this to `SupernodeFinalizeSimulated` automatically. @@ -418,3 +446,8 @@ err := client.SubscribeToEvents(ctx, event.SDKTaskCompleted, func(ctx context.Co err := client.SubscribeToAllEvents(ctx, func(ctx context.Context, e event.Event) { fmt.Printf("Event: %s for task %s\n", e.Type, e.TaskID) }) +## Timeouts & Networking + +For an in-depth explanation of how contexts, deadlines, client/server options, and cascade registration timeouts are applied see: + +- `supernode/sdk/docs/cascade-timeouts.md` diff --git a/sdk/adapters/supernodeservice/adapter.go b/sdk/adapters/supernodeservice/adapter.go index c6475326..b0c43c86 100644 --- a/sdk/adapters/supernodeservice/adapter.go +++ b/sdk/adapters/supernodeservice/adapter.go @@ -8,10 +8,10 @@ import ( "path/filepath" "regexp" "strconv" + "time" "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "github.com/LumeraProtocol/supernode/v2/pkg/net" "github.com/LumeraProtocol/supernode/v2/sdk/event" "github.com/LumeraProtocol/supernode/v2/sdk/log" @@ -80,13 +80,22 @@ func calculateOptimalChunkSize(fileSize int64) int { const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error) { - // Create the client stream - ctx = net.AddCorrelationID(ctx) + // Create a cancelable context for phased timers (no correlation IDs) + baseCtx := ctx + phaseCtx, cancel := context.WithCancel(baseCtx) + defer cancel() - stream, err := a.client.Register(ctx, opts...) + // Create the client stream + stream, err := a.client.Register(phaseCtx, opts...) if err != nil { a.logger.Error(ctx, "Failed to create register stream", "error", err) + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKUploadFailed, "upload failed | reason=stream_open", event.EventData{ + event.KeyTaskID: in.TaskId, + event.KeyActionID: in.ActionID, + }) + } return nil, err } @@ -94,6 +103,12 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca file, err := os.Open(in.FilePath) if err != nil { a.logger.Error(ctx, "Failed to open file", "filePath", in.FilePath, "error", err) + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKUploadFailed, "upload failed | reason=file_open", event.EventData{ + event.KeyTaskID: in.TaskId, + event.KeyActionID: in.ActionID, + }) + } return nil, fmt.Errorf("failed to open file: %w", err) } defer file.Close() @@ -102,6 +117,12 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca fileInfo, err := file.Stat() if err != nil { a.logger.Error(ctx, "Failed to get file stats", "filePath", in.FilePath, "error", err) + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKUploadFailed, "upload failed | reason=file_stat", event.EventData{ + event.KeyTaskID: in.TaskId, + event.KeyActionID: in.ActionID, + }) + } return nil, fmt.Errorf("failed to get file stats: %w", err) } totalBytes := fileInfo.Size() @@ -125,7 +146,26 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca chunkIndex := 0 buffer := make([]byte, chunkSize) - // Read and send data in chunks + // Emit upload started event + if in.EventLogger != nil { + estChunks := (totalBytes + int64(chunkSize) - 1) / int64(chunkSize) + in.EventLogger(baseCtx, event.SDKUploadStarted, + fmt.Sprintf("upload started | size=%dB chunk_size=%dB est_chunks=%d", totalBytes, chunkSize, estChunks), + event.EventData{event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID}) + } + + uploadStart := time.Now() + + // Start upload phase timer + uploadTimer := time.AfterFunc(cascadeUploadTimeout, func() { + a.logger.Error(baseCtx, "Upload phase timeout reached; cancelling stream") + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKUploadFailed, "upload failed | reason=timeout", event.EventData{event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID}) + } + cancel() + }) + + // Read and send data in chunks (upload phase) for { // Read a chunk from the file n, err := file.Read(buffer) @@ -134,6 +174,12 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca } if err != nil { a.logger.Error(ctx, "Failed to read file chunk", "chunkIndex", chunkIndex, "error", err) + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKUploadFailed, fmt.Sprintf("upload failed | reason=read_error chunk=%d", chunkIndex), event.EventData{ + event.KeyTaskID: in.TaskId, + event.KeyActionID: in.ActionID, + }) + } return nil, fmt.Errorf("failed to read file chunk: %w", err) } @@ -148,6 +194,12 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca if err := stream.Send(chunk); err != nil { a.logger.Error(ctx, "Failed to send data chunk", "chunkIndex", chunkIndex, "error", err) + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKUploadFailed, fmt.Sprintf("upload failed | reason=send_error chunk=%d", chunkIndex), event.EventData{ + event.KeyTaskID: in.TaskId, + event.KeyActionID: in.ActionID, + }) + } return nil, fmt.Errorf("failed to send chunk: %w", err) } @@ -171,6 +223,12 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca if err := stream.Send(metadata); err != nil { a.logger.Error(ctx, "Failed to send metadata", "TaskId", in.TaskId, "ActionID", in.ActionID, "error", err) + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKUploadFailed, "upload failed | reason=send_metadata", event.EventData{ + event.KeyTaskID: in.TaskId, + event.KeyActionID: in.ActionID, + }) + } return nil, fmt.Errorf("failed to send metadata: %w", err) } @@ -178,9 +236,52 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca if err := stream.CloseSend(); err != nil { a.logger.Error(ctx, "Failed to close stream and receive response", "TaskId", in.TaskId, "ActionID", in.ActionID, "error", err) + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKUploadFailed, "upload failed | reason=close_send", event.EventData{ + event.KeyTaskID: in.TaskId, + event.KeyActionID: in.ActionID, + }) + } return nil, fmt.Errorf("failed to receive response: %w", err) } + // Upload phase completed; stop its timer + if uploadTimer != nil { + uploadTimer.Stop() + } + + // Emit upload completed with throughput metrics + if in.EventLogger != nil { + elapsed := time.Since(uploadStart).Seconds() + mb := float64(bytesRead) / (1024.0 * 1024.0) + avg := 0.0 + if elapsed > 0 { + avg = mb / elapsed + } + in.EventLogger(baseCtx, event.SDKUploadCompleted, + fmt.Sprintf("upload complete | size=%dB chunks=%d elapsed=%.2fs avg=%.2fMB/s", totalBytes, chunkIndex, elapsed, avg), + event.EventData{event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID}) + } + + // Processing phase timer starts now (waiting for server streamed responses) + processingTimer := time.AfterFunc(cascadeProcessingTimeout, func() { + a.logger.Error(baseCtx, "Processing phase timeout reached; cancelling stream") + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKProcessingTimeout, "processing timeout", event.EventData{event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID}) + } + cancel() + }) + defer func() { + if processingTimer != nil { + processingTimer.Stop() + } + }() + + // Emit processing started + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKProcessingStarted, "processing started | awaiting server progress", event.EventData{event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID}) + } + // Handle streaming responses from supernode var finalResp *cascade.RegisterResponse for { @@ -189,6 +290,18 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca break } if err != nil { + // Distinguish timeout phase for clearer error messages + if phaseCtx.Err() != nil { + // At this point, upload is finished; classify as processing timeout/cancel + if phaseCtx.Err() == context.DeadlineExceeded || phaseCtx.Err() == context.Canceled { + return nil, fmt.Errorf("processing timed out or cancelled: %w", phaseCtx.Err()) + } + } + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKProcessingFailed, + fmt.Sprintf("processing failed | reason=stream_recv error=%v", err), + event.EventData{event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID}) + } return nil, fmt.Errorf("failed to receive server response: %w", err) } @@ -219,6 +332,13 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca } if finalResp == nil { + // If context was cancelled due to timer, surface a more specific error + if phaseCtx.Err() != nil { + return nil, fmt.Errorf("processing timed out or cancelled before final response: %w", phaseCtx.Err()) + } + if in.EventLogger != nil { + in.EventLogger(baseCtx, event.SDKProcessingFailed, "processing failed | reason=missing_final_response", event.EventData{event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID}) + } return nil, fmt.Errorf("no final response with tx_hash received") } @@ -230,7 +350,9 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca } func (a *cascadeAdapter) GetSupernodeStatus(ctx context.Context) (SupernodeStatusresponse, error) { - resp, err := a.statusClient.GetStatus(ctx, &supernode.StatusRequest{}) + // Gate P2P metrics via context option to keep API backward compatible + req := &supernode.StatusRequest{IncludeP2PMetrics: includeP2PMetrics(ctx)} + resp, err := a.statusClient.GetStatus(ctx, req) if err != nil { a.logger.Error(ctx, "Failed to get supernode status", "error", err) return SupernodeStatusresponse{}, fmt.Errorf("failed to get supernode status: %w", err) @@ -248,7 +370,7 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( opts ...grpc.CallOption, ) (*CascadeSupernodeDownloadResponse, error) { - ctx = net.AddCorrelationID(ctx) + // Use provided context as-is (no correlation IDs) // 1. Open gRPC stream (server-stream) stream, err := a.client.Download(ctx, &cascade.DownloadRequest{ @@ -331,7 +453,7 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( // toSdkEvent converts a supernode-side enum value into an internal SDK EventType. func toSdkEvent(e cascade.SupernodeEventType) event.EventType { - switch e { + switch e { case cascade.SupernodeEventType_ACTION_RETRIEVED: return event.SupernodeActionRetrieved case cascade.SupernodeEventType_ACTION_FEE_VERIFIED: @@ -356,8 +478,10 @@ func toSdkEvent(e cascade.SupernodeEventType) event.EventType { return event.SupernodeActionFinalized case cascade.SupernodeEventType_ARTEFACTS_DOWNLOADED: return event.SupernodeArtefactsDownloaded - case cascade.SupernodeEventType_FINALIZE_SIMULATED: - return event.SupernodeFinalizeSimulated + case cascade.SupernodeEventType_FINALIZE_SIMULATED: + return event.SupernodeFinalizeSimulated + case cascade.SupernodeEventType_FINALIZE_SIMULATION_FAILED: + return event.SupernodeFinalizeSimulationFailed default: return event.SupernodeUnknown } @@ -387,9 +511,9 @@ func parseSuccessRate(msg string) (float64, bool) { } func toSdkSupernodeStatus(resp *supernode.StatusResponse) *SupernodeStatusresponse { - result := &SupernodeStatusresponse{} - result.Version = resp.Version - result.UptimeSeconds = resp.UptimeSeconds + result := &SupernodeStatusresponse{} + result.Version = resp.Version + result.UptimeSeconds = resp.UptimeSeconds // Convert Resources data if resp.Resources != nil { @@ -444,9 +568,117 @@ func toSdkSupernodeStatus(resp *supernode.StatusResponse) *SupernodeStatusrespon copy(result.Network.PeerAddresses, resp.Network.PeerAddresses) } - // Copy rank and IP address - result.Rank = resp.Rank - result.IPAddress = resp.IpAddress - - return result + // Copy rank and IP address + result.Rank = resp.Rank + result.IPAddress = resp.IpAddress + + // Map optional P2P metrics + if resp.P2PMetrics != nil { + // DHT metrics + if resp.P2PMetrics.DhtMetrics != nil { + // Store success recent + for _, p := range resp.P2PMetrics.DhtMetrics.StoreSuccessRecent { + result.P2PMetrics.DhtMetrics.StoreSuccessRecent = append(result.P2PMetrics.DhtMetrics.StoreSuccessRecent, struct { + TimeUnix int64 + Requests int32 + Successful int32 + SuccessRate float64 + }{ + TimeUnix: p.TimeUnix, + Requests: p.Requests, + Successful: p.Successful, + SuccessRate: p.SuccessRate, + }) + } + // Batch retrieve recent + for _, p := range resp.P2PMetrics.DhtMetrics.BatchRetrieveRecent { + result.P2PMetrics.DhtMetrics.BatchRetrieveRecent = append(result.P2PMetrics.DhtMetrics.BatchRetrieveRecent, struct { + TimeUnix int64 + Keys int32 + Required int32 + FoundLocal int32 + FoundNetwork int32 + DurationMS int64 + }{ + TimeUnix: p.TimeUnix, + Keys: p.Keys, + Required: p.Required, + FoundLocal: p.FoundLocal, + FoundNetwork: p.FoundNetwork, + DurationMS: p.DurationMs, + }) + } + result.P2PMetrics.DhtMetrics.HotPathBannedSkips = resp.P2PMetrics.DhtMetrics.HotPathBannedSkips + result.P2PMetrics.DhtMetrics.HotPathBanIncrements = resp.P2PMetrics.DhtMetrics.HotPathBanIncrements + } + + // Network handle metrics + if resp.P2PMetrics.NetworkHandleMetrics != nil { + if result.P2PMetrics.NetworkHandleMetrics == nil { + result.P2PMetrics.NetworkHandleMetrics = map[string]struct{ + Total int64 + Success int64 + Failure int64 + Timeout int64 + }{} + } + for k, v := range resp.P2PMetrics.NetworkHandleMetrics { + result.P2PMetrics.NetworkHandleMetrics[k] = struct{ + Total int64 + Success int64 + Failure int64 + Timeout int64 + }{ + Total: v.Total, + Success: v.Success, + Failure: v.Failure, + Timeout: v.Timeout, + } + } + } + + // Conn pool metrics + if resp.P2PMetrics.ConnPoolMetrics != nil { + if result.P2PMetrics.ConnPoolMetrics == nil { + result.P2PMetrics.ConnPoolMetrics = map[string]int64{} + } + for k, v := range resp.P2PMetrics.ConnPoolMetrics { + result.P2PMetrics.ConnPoolMetrics[k] = v + } + } + + // Ban list + for _, b := range resp.P2PMetrics.BanList { + result.P2PMetrics.BanList = append(result.P2PMetrics.BanList, struct { + ID string + IP string + Port uint32 + Count int32 + CreatedAtUnix int64 + AgeSeconds int64 + }{ + ID: b.Id, + IP: b.Ip, + Port: b.Port, + Count: b.Count, + CreatedAtUnix: b.CreatedAtUnix, + AgeSeconds: b.AgeSeconds, + }) + } + + // Database + if resp.P2PMetrics.Database != nil { + result.P2PMetrics.Database.P2PDBSizeMB = resp.P2PMetrics.Database.P2PDbSizeMb + result.P2PMetrics.Database.P2PDBRecordsCount = resp.P2PMetrics.Database.P2PDbRecordsCount + } + + // Disk + if resp.P2PMetrics.Disk != nil { + result.P2PMetrics.Disk.AllMB = resp.P2PMetrics.Disk.AllMb + result.P2PMetrics.Disk.UsedMB = resp.P2PMetrics.Disk.UsedMb + result.P2PMetrics.Disk.FreeMB = resp.P2PMetrics.Disk.FreeMb + } + } + + return result } diff --git a/sdk/adapters/supernodeservice/options.go b/sdk/adapters/supernodeservice/options.go new file mode 100644 index 00000000..547a28c9 --- /dev/null +++ b/sdk/adapters/supernodeservice/options.go @@ -0,0 +1,29 @@ +package supernodeservice + +import "context" + +// internal context key to toggle P2P metrics in status requests +type ctxKey string + +const ctxKeyIncludeP2P ctxKey = "include_p2p_metrics" + +// WithIncludeP2PMetrics returns a child context that requests detailed P2P metrics +// (and peer info) in status responses. +func WithIncludeP2PMetrics(ctx context.Context) context.Context { + return context.WithValue(ctx, ctxKeyIncludeP2P, true) +} + +// WithP2PMetrics allows explicitly setting the include flag. +func WithP2PMetrics(ctx context.Context, include bool) context.Context { + return context.WithValue(ctx, ctxKeyIncludeP2P, include) +} + +// includeP2PMetrics reads the flag from context; defaults to false when unset. +func includeP2PMetrics(ctx context.Context) bool { + v := ctx.Value(ctxKeyIncludeP2P) + if b, ok := v.(bool); ok { + return b + } + return false +} + diff --git a/sdk/adapters/supernodeservice/timeouts.go b/sdk/adapters/supernodeservice/timeouts.go new file mode 100644 index 00000000..83311185 --- /dev/null +++ b/sdk/adapters/supernodeservice/timeouts.go @@ -0,0 +1,11 @@ +package supernodeservice + +import "time" + +// cascadeUploadTimeout provides a generous budget for client-side upload over +// potentially slow networks. Adjust as needed; future work may make this configurable. +const cascadeUploadTimeout = 60 * time.Minute + +// cascadeProcessingTimeout bounds the time waiting for server-side processing +// and final response (e.g., tx hash) after upload completes. +const cascadeProcessingTimeout = 10 * time.Minute diff --git a/sdk/adapters/supernodeservice/types.go b/sdk/adapters/supernodeservice/types.go index cd079603..aa76e70d 100644 --- a/sdk/adapters/supernodeservice/types.go +++ b/sdk/adapters/supernodeservice/types.go @@ -45,30 +45,75 @@ type StorageInfo struct { } type SupernodeStatusresponse struct { - Version string // Supernode version - UptimeSeconds uint64 // Uptime in seconds - Resources struct { - CPU struct { - UsagePercent float64 - Cores int32 - } - Memory struct { - TotalGB float64 - UsedGB float64 - AvailableGB float64 - UsagePercent float64 - } - Storage []StorageInfo - HardwareSummary string // Formatted hardware summary - } - RunningTasks []ServiceTasks // Services with running tasks - RegisteredServices []string // All available service names - Network struct { - PeersCount int32 // Number of connected peers - PeerAddresses []string // List of peer addresses - } - Rank int32 // Rank in top supernodes list (0 if not in top list) - IPAddress string // Supernode IP address with port + Version string // Supernode version + UptimeSeconds uint64 // Uptime in seconds + Resources struct { + CPU struct { + UsagePercent float64 + Cores int32 + } + Memory struct { + TotalGB float64 + UsedGB float64 + AvailableGB float64 + UsagePercent float64 + } + Storage []StorageInfo + HardwareSummary string // Formatted hardware summary + } + RunningTasks []ServiceTasks // Services with running tasks + RegisteredServices []string // All available service names + Network struct { + PeersCount int32 // Number of connected peers + PeerAddresses []string // List of peer addresses + } + Rank int32 // Rank in top supernodes list (0 if not in top list) + IPAddress string // Supernode IP address with port + // Optional detailed P2P metrics (present when requested) + P2PMetrics struct { + DhtMetrics struct { + StoreSuccessRecent []struct { + TimeUnix int64 + Requests int32 + Successful int32 + SuccessRate float64 + } + BatchRetrieveRecent []struct { + TimeUnix int64 + Keys int32 + Required int32 + FoundLocal int32 + FoundNetwork int32 + DurationMS int64 + } + HotPathBannedSkips int64 + HotPathBanIncrements int64 + } + NetworkHandleMetrics map[string]struct{ + Total int64 + Success int64 + Failure int64 + Timeout int64 + } + ConnPoolMetrics map[string]int64 + BanList []struct { + ID string + IP string + Port uint32 + Count int32 + CreatedAtUnix int64 + AgeSeconds int64 + } + Database struct { + P2PDBSizeMB float64 + P2PDBRecordsCount int64 + } + Disk struct { + AllMB float64 + UsedMB float64 + FreeMB float64 + } + } } type CascadeSupernodeDownloadRequest struct { ActionID string diff --git a/sdk/docs/cascade-timeouts.md b/sdk/docs/cascade-timeouts.md new file mode 100644 index 00000000..716804bc --- /dev/null +++ b/sdk/docs/cascade-timeouts.md @@ -0,0 +1,197 @@ +# Cascade Registration Timeouts and Networking + +This document explains how timeouts and deadlines are applied across the SDK cascade registration flow, including the current split between upload and processing phases and the relevant client/server defaults. + +## Purpose + +- Make slow, user‑network–dependent uploads more tolerant without impacting other stages. +- Keep health checks and connection establishment responsive. +- Enable clearer error categorization: upload vs processing. + +## TL;DR Defaults + +- Upload timeout (adapter): `cascadeUploadTimeout = 60m` — covers client-side file streaming to the supernode. +- Processing timeout (adapter): `cascadeProcessingTimeout = 10m` — covers waiting for server progress/final tx hash after upload completes. +- Health check to supernodes (task): `connectionTimeout = 10s` — per-node probe during discovery. +- gRPC connect (client): + - Adds a default `30s` deadline if caller context has none. + - Connection readiness gate: `ConnWaitTime = 10s` per attempt, with `MaxRetries = 3` and retry backoff. +- ALTS handshake (secure transport): `30s` internal read timeouts (client and server sides). +- Supernode gRPC server: + - No per‑RPC timeout for `Register`/`Download` handlers. + - Keepalive is permissive (idle ping at 1h, ping ack timeout 30m). + - Stream tuning: 16MB message caps, 16MB stream window, 160MB conn window, ~20 concurrent streams. + +## Control Flow and Contexts + +1) `sdk/action/client.go: ClientImpl.StartCascade(ctx, ...)` + - Forwards `ctx` to Task Manager. + +2) `sdk/task/manager.go: ManagerImpl.CreateCascadeTask(...)` + - Detaches from caller: `taskCtx := context.WithCancel(context.Background())`. + - All subsequent work uses `taskCtx` (no deadline by default). + +3) `sdk/task/cascade.go: CascadeTask.Run(ctx)` + - Validates file size; fetches healthy supernodes; registers with one. + +4) Discovery: `sdk/task/task.go: BaseTask.fetchSupernodes` → `BaseTask.isServing` + - `context.WithTimeout(parent, 10s)` for health probe (create client + `HealthCheck`). + +5) Registration attempt: `sdk/task/cascade.go: attemptRegistration` + - Client connect: uses task context (no deadline); gRPC injects a 30s default at connect if needed. + - No outer registration timeout here; the adapter handles per‑phase timers. + +6) RPC staging: + - `sdk/net/impl.go: supernodeClient.RegisterCascade` → + - `sdk/adapters/supernodeservice/adapter.go: CascadeSupernodeRegister` performs client‑stream upload and reads server progress / final tx hash. + +## Where Timeouts Come From (by Layer) + +- SDK adapter level (registration RPC): + - `cascadeUploadTimeout` (60m): upload phase timer (file chunks + metadata + CloseSend). + - `cascadeProcessingTimeout` (10m): processing phase timer (receive server progress + final tx hash). +- SDK task level: + - `connectionTimeout` (10s): supernode health checks only. + +- gRPC client (`pkg/net/grpc/client`): + - `defaultTimeout = 30s`: applied to connect if context lacks a deadline. + - `ConnWaitTime = 10s`, `MaxRetries = 3`, backoff configured; keepalives: 30m/30m. + +- ALTS handshake (`pkg/net/credentials/alts/handshake`): + - `defaultTimeout = 30s` for handshake read operations (client/server). + +- gRPC server (`pkg/net/grpc/server` and supernode runtime): + - No explicit per‑RPC timeouts; generous keepalives; tuned flow control and message sizes for 4MB chunks. + +## SDK Constants + +Timeout constants are defined in dedicated files for clarity: + +- Upload/Processing: `supernode/sdk/adapters/supernodeservice/timeouts.go` +- Connection/health probe: `supernode/sdk/task/timeouts.go` + +Notes: +- `BaseTask.isServing` keeps a short 10s budget for snappy health checks. +- gRPC connect/handshake defaults remain unchanged. + +## Implementation Details + +The split is implemented inside `CascadeSupernodeRegister` where the phases are naturally separated by the client‑stream CloseSend. + +1) Create a cancelable context from the inbound one for the stream lifetime: + +```go +phaseCtx, cancel := context.WithCancel(ctx) +defer cancel() +stream, err := a.client.Register(phaseCtx, opts...) +``` + +2) Upload phase timer: + +```go +uploadTimer := time.AfterFunc(cascadeUploadTimeout, cancel) + +// send chunks... +// send metadata... + +if err := stream.CloseSend(); err != nil { /* ... */ } +uploadTimer.Stop() +``` + +3) Processing phase timer (server progress → final tx hash): + +```go +processingTimer := time.AfterFunc(cascadeProcessingTimeout, cancel) +defer processingTimer.Stop() + +for { + resp, err := stream.Recv() + // handle EOF, errors, progress, final tx hash +} +``` + +4) Error mapping and events: +- If cancellation occurs during Send loop → classify as upload timeout and emit `SDKUploadTimeout`. +- If cancellation occurs during Recv loop → classify as processing timeout and emit `SDKProcessingTimeout`. +- Surface distinct error messages and publish events accordingly. + +This approach requires no request‑struct changes and preserves existing call sites. It uses a single cancelable context across both phases and phase‑specific timers. + +## Additional Notes + +- Health checks use `connectionTimeout = 10s` during supernode discovery. +- gRPC client connect behavior: adds a `30s` deadline if none is present, waits up to `ConnWaitTime = 10s` per attempt with retries. +- Downloads use a separate `downloadTimeout = 5m` envelope. + +## Operational Guidance + +- For slow client links: raise `cascadeUploadTimeout` (e.g., 30–120m). Keep processing modest (e.g., 5–10m) unless chain finalization is known to stall. +- Server tuning is already generous; no server change required to support longer uploads. +- Telemetry: differentiate upload vs processing timeout in logs and emitted events for better retry behavior and user messaging. +- Retry policy: on upload timeout, prefer retrying with a different supernode; on processing timeout, consider whether the server might still finalize (idempotency depends on service semantics). + +## File/Code Reference Map + +- SDK + - `supernode/sdk/action/client.go` — entrypoints, no timeouts added. + - `supernode/sdk/task/manager.go` — detaches from caller context; creates and runs tasks. + - `supernode/sdk/task/timeouts.go` — `connectionTimeout` for health checks. + - `supernode/sdk/task/task.go` — discovery + health checks using `connectionTimeout`. + - `supernode/sdk/adapters/supernodeservice/timeouts.go` — upload/processing timeout constants. + - `supernode/sdk/adapters/supernodeservice/adapter.go` — upload and progress stream handling (phase timers + events). + - `supernode/sdk/net/factory.go` — client options tuned for streaming. + - `supernode/pkg/net/grpc/client` — connect timeout injection, readiness wait, retries, keepalive. + - `supernode/pkg/net/credentials/alts/handshake` — 30s handshake timeouts. + +- Supernode + - `supernode/supernode/node/supernode/server/server.go` — server options (16MB caps, windows, 20 streams). + - `supernode/supernode/node/action/server/cascade/cascade_action_server.go` — server-side Register/Download handlers (no per‑RPC timeout). + +## Events + +- Upload phase timeout: `SDKUploadTimeout`. +- Processing phase timeout: `SDKProcessingTimeout`. +# Cascade Registration Timeouts and Networking + +This document describes how the SDK applies timeouts and deadlines during cascade registration and download, and summarizes the relevant client and server networking defaults. + +## Time Budgets + +- Upload (adapter): `cascadeUploadTimeout = 60m` — client-side streaming of file chunks and metadata. +- Processing (adapter): `cascadeProcessingTimeout = 10m` — wait for server progress and final tx hash after upload completes. +- Discovery (task): `connectionTimeout = 10s` — per-supernode health probe during discovery. +- Download (task): `downloadTimeout = 5m` — envelope for cascade download. +- gRPC client connect: adds a `30s` deadline if none is present; readiness wait per attempt `ConnWaitTime = 10s` with retries and backoff. +- ALTS handshake: internal `30s` read timeouts on both client and server sides. +- Supernode gRPC server: no per-RPC timeout; keepalive is permissive (idle ping ~1h, ack timeout ~30m); flow-control and message-size tuning supports 4MB chunks. + +## Control Flow + +1) `sdk/action/client.go: ClientImpl.StartCascade(ctx, ...)` — forwards `ctx` to the Task Manager. +2) `sdk/task/manager.go: ManagerImpl.CreateCascadeTask(...)` — detaches from caller (`context.WithCancel(context.Background())`). +3) `sdk/task/cascade.go: CascadeTask.Run(ctx)` — validates file size, discovers healthy supernodes, attempts registration. +4) `sdk/task/task.go: BaseTask.fetchSupernodes` → `BaseTask.isServing` — health probe with `connectionTimeout = 10s` per node. +5) `sdk/task/cascade.go: attemptRegistration` — creates client and calls `RegisterCascade` with task context. +6) `sdk/adapters/supernodeservice/adapter.go: CascadeSupernodeRegister` — applies phase timers: + - Upload phase: send chunks and metadata; cancel if `cascadeUploadTimeout` elapses. + - Processing phase: receive server progress and final tx hash; cancel if `cascadeProcessingTimeout` elapses. + +## Events + +- `SDKUploadTimeout` — emitted when the upload phase exceeds its time budget. +- `SDKProcessingTimeout` — emitted when the post-upload processing exceeds its time budget. + +## Files and Constants + +- `supernode/sdk/adapters/supernodeservice/timeouts.go` — `cascadeUploadTimeout`, `cascadeProcessingTimeout`. +- `supernode/sdk/adapters/supernodeservice/adapter.go` — phased timers and stream handling. +- `supernode/sdk/task/timeouts.go` — `connectionTimeout` for discovery health checks. +- `supernode/sdk/task/task.go` — discovery and health probing. +- `supernode/sdk/task/download.go` — `downloadTimeout` for downloads. +- `supernode/pkg/net/grpc/client` — connect deadline injection, readiness wait, retries, keepalive defaults. +- `supernode/pkg/net/credentials/alts/handshake` — ALTS handshake timeouts. +- `supernode/supernode/node/supernode/server/server.go` — server stream tuning and keepalive parameters. + +## Tuning + +Adjust the constants in the SDK to fit deployment requirements (e.g., extend upload timeout for slower networks). Client/server defaults can be tuned as needed while keeping discovery responsive and long uploads reliable. diff --git a/sdk/event/types.go b/sdk/event/types.go index dd040f1f..2f7be099 100644 --- a/sdk/event/types.go +++ b/sdk/event/types.go @@ -14,7 +14,7 @@ type EventType string // These events are used to track the progress of tasks // and to notify subscribers about important changes in the system. const ( - SDKTaskStarted EventType = "sdk:started" + SDKTaskStarted EventType = "sdk:started" SDKSupernodesUnavailable EventType = "sdk:supernodes_unavailable" SDKSupernodesFound EventType = "sdk:supernodes_found" SDKRegistrationAttempt EventType = "sdk:registration_attempt" @@ -22,7 +22,15 @@ const ( SDKRegistrationSuccessful EventType = "sdk:registration_successful" SDKTaskTxHashReceived EventType = "sdk:txhash_received" SDKTaskCompleted EventType = "sdk:completed" - SDKTaskFailed EventType = "sdk:failed" + SDKTaskFailed EventType = "sdk:failed" + SDKConnectionEstablished EventType = "sdk:connection_established" + // Upload/processing phase events for cascade registration + SDKUploadStarted EventType = "sdk:upload_started" + SDKUploadCompleted EventType = "sdk:upload_completed" + SDKUploadFailed EventType = "sdk:upload_failed" // reason includes timeout + SDKProcessingStarted EventType = "sdk:processing_started" + SDKProcessingFailed EventType = "sdk:processing_failed" + SDKProcessingTimeout EventType = "sdk:processing_timeout" SDKDownloadAttempt EventType = "sdk:download_attempt" SDKDownloadFailure EventType = "sdk:download_failure" @@ -42,9 +50,10 @@ const ( SupernodeRQIDVerified EventType = "supernode:rqid_verified" SupernodeFinalizeSimulated EventType = "supernode:finalize_simulated" SupernodeArtefactsStored EventType = "supernode:artefacts_stored" - SupernodeActionFinalized EventType = "supernode:action_finalized" - SupernodeArtefactsDownloaded EventType = "supernode:artefacts_downloaded" - SupernodeUnknown EventType = "supernode:unknown" + SupernodeActionFinalized EventType = "supernode:action_finalized" + SupernodeArtefactsDownloaded EventType = "supernode:artefacts_downloaded" + SupernodeUnknown EventType = "supernode:unknown" + SupernodeFinalizeSimulationFailed EventType = "supernode:finalize_simulation_failed" ) // EventData is a map of event data attributes using standardized keys diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index cb46ef6e..a33a3acc 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -3,7 +3,6 @@ package task import ( "context" "fmt" - "time" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" @@ -11,10 +10,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/sdk/net" ) -const ( - registrationTimeout = 5 * time.Minute // Timeout for registration requests - connectionTimeout = 10 * time.Second // Timeout for connection requests -) +// connectionTimeout is defined in timeouts.go for the task package. type CascadeTask struct { BaseTask @@ -107,19 +103,23 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum } func (t *CascadeTask) attemptRegistration(ctx context.Context, _ int, sn lumera.Supernode, factory *net.ClientFactory, req *supernodeservice.CascadeSupernodeRegisterRequest) error { - client, err := factory.CreateClient(ctx, sn) - if err != nil { - return fmt.Errorf("create client %s: %w", sn.CosmosAddress, err) - } - defer client.Close(ctx) - - uploadCtx, cancel := context.WithTimeout(ctx, registrationTimeout) - defer cancel() - - req.EventLogger = func(ctx context.Context, evt event.EventType, msg string, data event.EventData) { - t.LogEvent(ctx, evt, msg, data) - } - resp, err := client.RegisterCascade(uploadCtx, req) + client, err := factory.CreateClient(ctx, sn) + if err != nil { + return fmt.Errorf("create client %s: %w", sn.CosmosAddress, err) + } + defer client.Close(ctx) + + // Emit connection established event for observability + t.LogEvent(ctx, event.SDKConnectionEstablished, "connection established", event.EventData{ + event.KeySupernode: sn.GrpcEndpoint, + event.KeySupernodeAddress: sn.CosmosAddress, + }) + + req.EventLogger = func(ctx context.Context, evt event.EventType, msg string, data event.EventData) { + t.LogEvent(ctx, evt, msg, data) + } + // Use ctx directly; per-phase timers are applied inside the adapter + resp, err := client.RegisterCascade(ctx, req) if err != nil { return fmt.Errorf("upload to %s: %w", sn.CosmosAddress, err) } diff --git a/sdk/task/helpers.go b/sdk/task/helpers.go index aacbac75..f887aeb2 100644 --- a/sdk/task/helpers.go +++ b/sdk/task/helpers.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" + snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" "github.com/LumeraProtocol/supernode/v2/sdk/net" ) @@ -129,7 +130,9 @@ func (m *ManagerImpl) checkSupernodesPeerConnectivity(ctx context.Context, block continue // Skip this supernode if we can't connect } - status, err := client.GetSupernodeStatus(ctx) + // Request peer info and P2P metrics to assess connectivity + ctxWithMetrics := snsvc.WithIncludeP2PMetrics(ctx) + status, err := client.GetSupernodeStatus(ctxWithMetrics) client.Close(ctx) if err != nil { continue // Skip this supernode if we can't get status diff --git a/sdk/task/timeouts.go b/sdk/task/timeouts.go new file mode 100644 index 00000000..f6e1e7e6 --- /dev/null +++ b/sdk/task/timeouts.go @@ -0,0 +1,8 @@ +package task + +import "time" + +// connectionTimeout bounds supernode health/connection probing. +// Keep this short to preserve snappy discovery without impacting long uploads. +const connectionTimeout = 10 * time.Second + diff --git a/supernode/node/supernode/server/status_server.go b/supernode/node/supernode/server/status_server.go index 09122465..62eba841 100644 --- a/supernode/node/supernode/server/status_server.go +++ b/supernode/node/supernode/server/status_server.go @@ -52,39 +52,39 @@ func (s *SupernodeServer) RegisterService(serviceName string, desc *grpc.Service // GetStatus implements SupernodeService.GetStatus func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { - // Get status from the common service - status, err := s.statusService.GetStatus(ctx) + // Get status from the common service; gate P2P metrics by request flag + status, err := s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) if err != nil { return nil, err } // Convert to protobuf response - response := &pb.StatusResponse{ - Version: status.Version, - UptimeSeconds: status.UptimeSeconds, - Resources: &pb.StatusResponse_Resources{ - Cpu: &pb.StatusResponse_Resources_CPU{ - UsagePercent: status.Resources.CPU.UsagePercent, - Cores: status.Resources.CPU.Cores, - }, - Memory: &pb.StatusResponse_Resources_Memory{ - TotalGb: status.Resources.Memory.TotalGB, - UsedGb: status.Resources.Memory.UsedGB, - AvailableGb: status.Resources.Memory.AvailableGB, - UsagePercent: status.Resources.Memory.UsagePercent, - }, - StorageVolumes: make([]*pb.StatusResponse_Resources_Storage, 0, len(status.Resources.Storage)), - HardwareSummary: status.Resources.HardwareSummary, - }, - RunningTasks: make([]*pb.StatusResponse_ServiceTasks, 0, len(status.RunningTasks)), - RegisteredServices: status.RegisteredServices, - Network: &pb.StatusResponse_Network{ - PeersCount: status.Network.PeersCount, - PeerAddresses: status.Network.PeerAddresses, - }, - Rank: status.Rank, - IpAddress: status.IPAddress, - } + response := &pb.StatusResponse{ + Version: status.Version, + UptimeSeconds: status.UptimeSeconds, + Resources: &pb.StatusResponse_Resources{ + Cpu: &pb.StatusResponse_Resources_CPU{ + UsagePercent: status.Resources.CPU.UsagePercent, + Cores: status.Resources.CPU.Cores, + }, + Memory: &pb.StatusResponse_Resources_Memory{ + TotalGb: status.Resources.Memory.TotalGB, + UsedGb: status.Resources.Memory.UsedGB, + AvailableGb: status.Resources.Memory.AvailableGB, + UsagePercent: status.Resources.Memory.UsagePercent, + }, + StorageVolumes: make([]*pb.StatusResponse_Resources_Storage, 0, len(status.Resources.Storage)), + HardwareSummary: status.Resources.HardwareSummary, + }, + RunningTasks: make([]*pb.StatusResponse_ServiceTasks, 0, len(status.RunningTasks)), + RegisteredServices: status.RegisteredServices, + Network: &pb.StatusResponse_Network{ + PeersCount: status.Network.PeersCount, + PeerAddresses: status.Network.PeerAddresses, + }, + Rank: status.Rank, + IpAddress: status.IPAddress, + } // Convert storage information for _, storage := range status.Resources.Storage { @@ -99,16 +99,85 @@ func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) } // Convert service tasks - for _, service := range status.RunningTasks { - serviceTask := &pb.StatusResponse_ServiceTasks{ - ServiceName: service.ServiceName, - TaskIds: service.TaskIDs, - TaskCount: service.TaskCount, - } - response.RunningTasks = append(response.RunningTasks, serviceTask) - } - - return response, nil + for _, service := range status.RunningTasks { + serviceTask := &pb.StatusResponse_ServiceTasks{ + ServiceName: service.ServiceName, + TaskIds: service.TaskIDs, + TaskCount: service.TaskCount, + } + response.RunningTasks = append(response.RunningTasks, serviceTask) + } + + // Map optional P2P metrics + if req.GetIncludeP2PMetrics() { + pm := status.P2PMetrics + pbdht := &pb.StatusResponse_P2PMetrics_DhtMetrics{} + for _, p := range pm.DhtMetrics.StoreSuccessRecent { + pbdht.StoreSuccessRecent = append(pbdht.StoreSuccessRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{ + TimeUnix: p.TimeUnix, + Requests: p.Requests, + Successful: p.Successful, + SuccessRate: p.SuccessRate, + }) + } + for _, p := range pm.DhtMetrics.BatchRetrieveRecent { + pbdht.BatchRetrieveRecent = append(pbdht.BatchRetrieveRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{ + TimeUnix: p.TimeUnix, + Keys: p.Keys, + Required: p.Required, + FoundLocal: p.FoundLocal, + FoundNetwork: p.FoundNetwork, + DurationMs: p.DurationMS, + }) + } + pbdht.HotPathBannedSkips = pm.DhtMetrics.HotPathBannedSkips + pbdht.HotPathBanIncrements = pm.DhtMetrics.HotPathBanIncrements + + pbpm := &pb.StatusResponse_P2PMetrics{ + DhtMetrics: pbdht, + NetworkHandleMetrics: map[string]*pb.StatusResponse_P2PMetrics_HandleCounters{}, + ConnPoolMetrics: map[string]int64{}, + BanList: []*pb.StatusResponse_P2PMetrics_BanEntry{}, + Database: &pb.StatusResponse_P2PMetrics_DatabaseStats{}, + Disk: &pb.StatusResponse_P2PMetrics_DiskStatus{}, + } + + // Network handle metrics + for k, v := range pm.NetworkHandleMetrics { + pbpm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{ + Total: v.Total, + Success: v.Success, + Failure: v.Failure, + Timeout: v.Timeout, + } + } + // Conn pool metrics + for k, v := range pm.ConnPoolMetrics { + pbpm.ConnPoolMetrics[k] = v + } + // Ban list + for _, b := range pm.BanList { + pbpm.BanList = append(pbpm.BanList, &pb.StatusResponse_P2PMetrics_BanEntry{ + Id: b.ID, + Ip: b.IP, + Port: b.Port, + Count: b.Count, + CreatedAtUnix: b.CreatedAtUnix, + AgeSeconds: b.AgeSeconds, + }) + } + // Database + pbpm.Database.P2PDbSizeMb = pm.Database.P2PDBSizeMB + pbpm.Database.P2PDbRecordsCount = pm.Database.P2PDBRecordsCount + // Disk + pbpm.Disk.AllMb = pm.Disk.AllMB + pbpm.Disk.UsedMb = pm.Disk.UsedMB + pbpm.Disk.FreeMb = pm.Disk.FreeMB + + response.P2PMetrics = pbpm + } + + return response, nil } // ListServices implements SupernodeService.ListServices diff --git a/supernode/services/cascade/adaptors/mocks/p2p_mock.go b/supernode/services/cascade/adaptors/mocks/p2p_mock.go index ad6c297e..025109b2 100644 --- a/supernode/services/cascade/adaptors/mocks/p2p_mock.go +++ b/supernode/services/cascade/adaptors/mocks/p2p_mock.go @@ -43,13 +43,12 @@ func (m *MockP2PService) EXPECT() *MockP2PServiceMockRecorder { } // StoreArtefacts mocks base method. -func (m *MockP2PService) StoreArtefacts(ctx context.Context, req adaptors.StoreArtefactsRequest, f logtrace.Fields) (float64, int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreArtefacts", ctx, req, f) - ret0, _ := ret[0].(float64) - ret1, _ := ret[1].(int) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 +func (m *MockP2PService) StoreArtefacts(ctx context.Context, req adaptors.StoreArtefactsRequest, f logtrace.Fields) (adaptors.StoreArtefactsMetrics, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StoreArtefacts", ctx, req, f) + ret0, _ := ret[0].(adaptors.StoreArtefactsMetrics) + ret1, _ := ret[1].(error) + return ret0, ret1 } // StoreArtefacts indicates an expected call of StoreArtefacts. diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index 9d1e2779..fcaad76a 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -28,15 +28,17 @@ const ( // //go:generate mockgen -destination=mocks/p2p_mock.go -package=cascadeadaptormocks -source=p2p.go type P2PService interface { - // StoreArtefacts stores ID files and RaptorQ symbols, returning an aggregated - // network success rate percentage across all store batches. - // - // Aggregation model: + // StoreArtefacts stores ID files and RaptorQ symbols. + // + // Aggregation model: // - Each underlying StoreBatch returns (ratePct, requests) where requests is - // the number of node RPCs. The aggregated rate is computed as a weighted - // average by requests across metadata and symbol batches, which yields the - // global success rate across all node calls attempted for this action. - StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) (float64, int, error) + // the number of node RPCs. The aggregated success rate can be computed as + // a weighted average by requests across metadata and symbol batches, + // yielding a global success view across all node calls attempted for this action. + // See implementation notes for item‑weighted aggregation currently in use. + // + // Returns detailed metrics for both categories along with an aggregated view. + StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) (StoreArtefactsMetrics, error) } // p2pImpl is the default implementation of the P2PService interface. @@ -57,33 +59,60 @@ type StoreArtefactsRequest struct { SymbolsDir string } -func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) (float64, int, error) { - logtrace.Info(ctx, "About to store ID files", logtrace.Fields{"taskID": req.TaskID, "fileCount": len(req.IDFiles)}) - - metaRate, metaReqs, err := p.storeCascadeMetadata(ctx, req.IDFiles, req.TaskID) - if err != nil { - return 0, 0, errors.Wrap(err, "failed to store ID files") - } - logtrace.Info(ctx, "id files have been stored", f) - - // NOTE: For now we aggregate by item count (ID files + symbol count). - // TODO(move-to-request-weighted): Switch aggregation to request-weighted once - // external consumers and metrics expectations are updated. We already return - // totalRequests so the event/logs can include accurate request counts. - symRate, symCount, symReqs, err := p.storeCascadeSymbols(ctx, req.TaskID, req.ActionID, req.SymbolsDir) - if err != nil { - return 0, 0, errors.Wrap(err, "error storing raptor-q symbols") - } - logtrace.Info(ctx, "raptor-q symbols have been stored", f) - - // Aggregate: weight by item counts (ID files + symbols) for now. - totalItems := len(req.IDFiles) + symCount - aggRate := 0.0 - if totalItems > 0 { - aggRate = ((metaRate * float64(len(req.IDFiles))) + (symRate * float64(symCount))) / float64(totalItems) - } - totalRequests := metaReqs + symReqs - return aggRate, totalRequests, nil +// StoreArtefactsMetrics captures detailed outcomes of metadata and symbols storage. +type StoreArtefactsMetrics struct { + // Metadata (ID files) + MetaRate float64 // percentage (0–100) + MetaRequests int // number of node RPCs attempted for metadata + MetaCount int // number of metadata files attempted + + // Symbols + SymRate float64 // percentage (0–100) across all symbol batches (item-weighted) + SymRequests int // total node RPCs for symbol batches + SymCount int // total symbols processed + + // Aggregated view + AggregatedRate float64 // item-weighted across metadata and symbols + TotalRequests int // MetaRequests + SymRequests +} + +func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) (StoreArtefactsMetrics, error) { + logtrace.Info(ctx, "About to store ID files", logtrace.Fields{"taskID": req.TaskID, "fileCount": len(req.IDFiles)}) + + metaRate, metaReqs, err := p.storeCascadeMetadata(ctx, req.IDFiles, req.TaskID) + if err != nil { + return StoreArtefactsMetrics{}, errors.Wrap(err, "failed to store ID files") + } + logtrace.Info(ctx, "id files have been stored", f) + + // NOTE: For now we aggregate by item count (ID files + symbol count). + // TODO(move-to-request-weighted): Switch aggregation to request-weighted once + // external consumers and metrics expectations are updated. We already return + // totalRequests so the event/logs can include accurate request counts. + symRate, symCount, symReqs, err := p.storeCascadeSymbols(ctx, req.TaskID, req.ActionID, req.SymbolsDir) + if err != nil { + return StoreArtefactsMetrics{}, errors.Wrap(err, "error storing raptor-q symbols") + } + logtrace.Info(ctx, "raptor-q symbols have been stored", f) + + // Aggregate: weight by item counts (ID files + symbols) for now. + metaCount := len(req.IDFiles) + totalItems := metaCount + symCount + aggRate := 0.0 + if totalItems > 0 { + aggRate = ((metaRate * float64(metaCount)) + (symRate * float64(symCount))) / float64(totalItems) + } + totalRequests := metaReqs + symReqs + return StoreArtefactsMetrics{ + MetaRate: metaRate, + MetaRequests: metaReqs, + MetaCount: metaCount, + SymRate: symRate, + SymRequests: symReqs, + SymCount: symCount, + AggregatedRate: aggRate, + TotalRequests: totalRequests, + }, nil } // storeCascadeMetadata stores cascade metadata (ID files) via P2P. @@ -132,39 +161,39 @@ func (p *p2pImpl) storeCascadeSymbols(ctx context.Context, taskID, actionID stri logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) - /* stream in fixed-size batches -------------------------------------- */ - sumWeightedRates := 0.0 - totalSymbols := 0 - totalRequests := 0 - for start := 0; start < len(keys); { - end := start + loadSymbolsBatchSize - if end > len(keys) { - end = len(keys) - } - batch := keys[start:end] - rate, requests, count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch) - if err != nil { - return rate, totalSymbols, totalRequests, err - } - sumWeightedRates += rate * float64(count) - totalSymbols += count - totalRequests += requests - start = end - } - - if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { - return 0, totalSymbols, totalRequests, fmt.Errorf("update first-batch flag: %w", err) - } - logtrace.Info(ctx, "finished storing RaptorQ symbols", logtrace.Fields{ - "curr-time": time.Now().UTC(), - "count": len(keys), - }) - - aggRate := 0.0 - if totalSymbols > 0 { - aggRate = sumWeightedRates / float64(totalSymbols) - } - return aggRate, totalSymbols, totalRequests, nil + /* stream in fixed-size batches -------------------------------------- */ + sumWeightedRates := 0.0 + totalSymbols := 0 + totalRequests := 0 + for start := 0; start < len(keys); { + end := start + loadSymbolsBatchSize + if end > len(keys) { + end = len(keys) + } + batch := keys[start:end] + rate, requests, count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch) + if err != nil { + return rate, totalSymbols, totalRequests, err + } + sumWeightedRates += rate * float64(count) + totalSymbols += count + totalRequests += requests + start = end + } + + if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { + return 0, totalSymbols, totalRequests, fmt.Errorf("update first-batch flag: %w", err) + } + logtrace.Info(ctx, "finished storing RaptorQ symbols", logtrace.Fields{ + "curr-time": time.Now().UTC(), + "count": len(keys), + }) + + aggRate := 0.0 + if totalSymbols > 0 { + aggRate = sumWeightedRates / float64(totalSymbols) + } + return aggRate, totalSymbols, totalRequests, nil } func walkSymbolTree(root string) ([]string, error) { @@ -198,24 +227,24 @@ func walkSymbolTree(root string) ([]string, error) { func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (float64, int, int, error) { logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) - symbols, err := utils.LoadSymbols(root, fileKeys) - if err != nil { - return 0, 0, 0, fmt.Errorf("load symbols: %w", err) - } + symbols, err := utils.LoadSymbols(root, fileKeys) + if err != nil { + return 0, 0, 0, fmt.Errorf("load symbols: %w", err) + } symCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() - rate, requests, err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID) - if err != nil { - return rate, requests, len(symbols), fmt.Errorf("p2p store batch: %w", err) - } - logtrace.Info(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) + rate, requests, err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID) + if err != nil { + return rate, requests, len(symbols), fmt.Errorf("p2p store batch: %w", err) + } + logtrace.Info(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) - if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { - return rate, requests, len(symbols), fmt.Errorf("delete symbols: %w", err) - } - logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) + if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { + return rate, requests, len(symbols), fmt.Errorf("delete symbols: %w", err) + } + logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) - return rate, requests, len(symbols), nil + return rate, requests, len(symbols), nil } diff --git a/supernode/services/cascade/events.go b/supernode/services/cascade/events.go index d878fc25..54006053 100644 --- a/supernode/services/cascade/events.go +++ b/supernode/services/cascade/events.go @@ -3,18 +3,19 @@ package cascade type SupernodeEventType int const ( - SupernodeEventTypeUNKNOWN SupernodeEventType = 0 - SupernodeEventTypeActionRetrieved SupernodeEventType = 1 - SupernodeEventTypeActionFeeVerified SupernodeEventType = 2 - SupernodeEventTypeTopSupernodeCheckPassed SupernodeEventType = 3 - SupernodeEventTypeMetadataDecoded SupernodeEventType = 4 - SupernodeEventTypeDataHashVerified SupernodeEventType = 5 - SupernodeEventTypeInputEncoded SupernodeEventType = 6 - SupernodeEventTypeSignatureVerified SupernodeEventType = 7 - SupernodeEventTypeRQIDsGenerated SupernodeEventType = 8 - SupernodeEventTypeRqIDsVerified SupernodeEventType = 9 - SupernodeEventTypeFinalizeSimulated SupernodeEventType = 10 - SupernodeEventTypeArtefactsStored SupernodeEventType = 11 - SupernodeEventTypeActionFinalized SupernodeEventType = 12 - SupernodeEventTypeArtefactsDownloaded SupernodeEventType = 13 + SupernodeEventTypeUNKNOWN SupernodeEventType = 0 + SupernodeEventTypeActionRetrieved SupernodeEventType = 1 + SupernodeEventTypeActionFeeVerified SupernodeEventType = 2 + SupernodeEventTypeTopSupernodeCheckPassed SupernodeEventType = 3 + SupernodeEventTypeMetadataDecoded SupernodeEventType = 4 + SupernodeEventTypeDataHashVerified SupernodeEventType = 5 + SupernodeEventTypeInputEncoded SupernodeEventType = 6 + SupernodeEventTypeSignatureVerified SupernodeEventType = 7 + SupernodeEventTypeRQIDsGenerated SupernodeEventType = 8 + SupernodeEventTypeRqIDsVerified SupernodeEventType = 9 + SupernodeEventTypeFinalizeSimulated SupernodeEventType = 10 + SupernodeEventTypeArtefactsStored SupernodeEventType = 11 + SupernodeEventTypeActionFinalized SupernodeEventType = 12 + SupernodeEventTypeArtefactsDownloaded SupernodeEventType = 13 + SupernodeEventTypeFinalizeSimulationFailed SupernodeEventType = 14 ) diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index 42ef0055..eed20700 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -5,6 +5,7 @@ import ( "context" "encoding/base64" "fmt" + stdmath "math" "strconv" "strings" @@ -177,13 +178,13 @@ func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta // - Underlying batches return (ratePct, requests) where `requests` is the number // of node RPCs attempted. The adaptor computes a weighted average by requests // across all batches, reflecting the overall network success rate. -func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) (float64, int, error) { - return task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ - IDFiles: idFiles, - SymbolsDir: symbolsDir, - TaskID: task.ID(), - ActionID: actionID, - }, f) +func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) (adaptors.StoreArtefactsMetrics, error) { + return task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ + IDFiles: idFiles, + SymbolsDir: symbolsDir, + TaskID: task.ID(), + ActionID: actionID, + }, f) } func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error { @@ -199,6 +200,27 @@ func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, er return status.Errorf(codes.Internal, "%s", msg) } +// emitArtefactsStored builds a single-line metrics summary and emits the +// SupernodeEventTypeArtefactsStored event while logging the metrics line. +func (task *CascadeRegistrationTask) emitArtefactsStored( + ctx context.Context, + metrics adaptors.StoreArtefactsMetrics, + fields logtrace.Fields, + send func(resp *RegisterResponse) error, +) { + ok := int(stdmath.Round(metrics.AggregatedRate / 100.0 * float64(metrics.TotalRequests))) + fail := metrics.TotalRequests - ok + line := fmt.Sprintf( + "artefacts stored | success_rate=%.2f%% agg_rate=%.2f%% total_reqs=%d ok=%d fail=%d meta_rate=%.2f%% meta_reqs=%d meta_count=%d sym_rate=%.2f%% sym_reqs=%d sym_count=%d", + metrics.AggregatedRate, metrics.AggregatedRate, metrics.TotalRequests, ok, fail, + metrics.MetaRate, metrics.MetaRequests, metrics.MetaCount, + metrics.SymRate, metrics.SymRequests, metrics.SymCount, + ) + fields["metrics"] = line + logtrace.Info(ctx, "artefacts have been stored", fields) + task.streamEvent(SupernodeEventTypeArtefactsStored, line, "", send) +} + // extractSignatureAndFirstPart extracts the signature and first part from the encoded data // data is expected to be in format: b64(JSON(Layout)).Signature func extractSignatureAndFirstPart(data string) (encodedMetadata string, signature string, err error) { diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index 4b5d62a2..b9c9de83 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -1,10 +1,8 @@ package cascade import ( - "context" - "fmt" - "math" - "os" + "context" + "os" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/supernode/services/common" @@ -147,38 +145,30 @@ func (task *CascadeRegistrationTask) Register( task.streamEvent(SupernodeEventTypeRqIDsVerified, "rq-ids have been verified", "", send) /* 10. Simulate finalize to avoid storing artefacts if it would fail ---------- */ - if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "finalize action simulation failed", fields) - return task.wrapErr(ctx, "finalize action simulation failed", err, fields) - } + if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "finalize action simulation failed", fields) + // Emit explicit simulation failure event for client visibility + task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "finalize action simulation failed", "", send) + return task.wrapErr(ctx, "finalize action simulation failed", err, fields) + } logtrace.Info(ctx, "finalize action simulation passed", fields) // Transmit as a standard event so SDK can propagate it (dedicated type) task.streamEvent(SupernodeEventTypeFinalizeSimulated, "finalize action simulation passed", "", send) /* 11. Persist artefacts -------------------------------------------------------- */ - // Persist artefacts to the P2P network. The returned `successRate` is a - // request-weighted percentage (0–100) computed across all underlying P2P - // store batches for this action. Each batch contributes its success rate - // weighted by the number of node RPCs attempted, so the aggregate reflects - // overall network behavior rather than item counts. - successRate, totalRequests, err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields) + // Persist artefacts to the P2P network. + // Aggregation model (context): + // - Each underlying StoreBatch returns (ratePct, requests) where requests is + // the number of node RPCs. The aggregated success rate can be computed as a + // weighted average by requests across metadata and symbol batches, yielding + // an overall network success view for the action. + metrics, err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields) if err != nil { return err } - // Attach the success rate to structured fields for observability. This value - // is best-effort and non-fatal so long as it meets the configured minimum in - // lower layers; failures below threshold would already propagate an error. - fields["success_rate"] = successRate - fields["requests"] = totalRequests - logtrace.Info(ctx, "artefacts have been stored", fields) - // Emit compact, rich metrics in the event message for external visibility. - // ok and fail are derived counts based on the measured rate and requests. - // TODO(move-to-request-weighted): Once aggregation switches to request-weighted, - // these derived counts will align exactly with the per-request success rate. - ok := int(math.Round(successRate / 100.0 * float64(totalRequests))) - fail := totalRequests - ok - task.streamEvent(SupernodeEventTypeArtefactsStored, fmt.Sprintf("artefacts stored | rate=%.2f%% req=%d ok=%d fail=%d", successRate, totalRequests, ok, fail), "", send) + // Emit single-line metrics via helper to keep Register clean + task.emitArtefactsStored(ctx, metrics, fields, send) resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) if err != nil { diff --git a/supernode/services/cascade/register_test.go b/supernode/services/cascade/register_test.go index 0bebd232..82af32fe 100644 --- a/supernode/services/cascade/register_test.go +++ b/supernode/services/cascade/register_test.go @@ -104,10 +104,19 @@ func TestCascadeRegistrationTask_Register(t *testing.T) { Metadata: codecpkg.Layout{Blocks: []codecpkg.Block{{BlockID: 1, Hash: "abc"}}}, }, nil) - // 8. Store artefacts (returns success rate, requests) + // 8. Store artefacts (returns detailed metrics) p2p.EXPECT(). StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()). - Return(95.0, 120, nil) + Return(adaptors.StoreArtefactsMetrics{ + MetaRate: 96.0, + MetaRequests: 20, + MetaCount: 2, + SymRate: 94.0, + SymRequests: 100, + SymCount: 1000, + AggregatedRate: 95.0, + TotalRequests: 120, + }, nil) }, expectedError: "", expectedEvents: 12, diff --git a/supernode/services/cascade/status.go b/supernode/services/cascade/status.go index 13cc9f9a..b5633a45 100644 --- a/supernode/services/cascade/status.go +++ b/supernode/services/cascade/status.go @@ -18,5 +18,5 @@ func (service *CascadeService) GetStatus(ctx context.Context) (StatusResponse, e statusService.RegisterTaskProvider(service) // Get the status from the common service - return statusService.GetStatus(ctx) + return statusService.GetStatus(ctx, false) } diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go index 56dfc5ba..c167eec2 100644 --- a/supernode/services/common/supernode/service.go +++ b/supernode/services/common/supernode/service.go @@ -1,15 +1,16 @@ package supernode import ( - "context" - "fmt" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/supernode/config" + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/config" ) // Version is the supernode version, set by the main application @@ -48,7 +49,7 @@ func (s *SupernodeStatusService) RegisterTaskProvider(provider TaskProvider) { // GetStatus returns the current system status including all registered services // This method collects CPU metrics, memory usage, and task information from all providers -func (s *SupernodeStatusService) GetStatus(ctx context.Context) (StatusResponse, error) { +func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (StatusResponse, error) { fields := logtrace.Fields{ logtrace.FieldMethod: "GetStatus", logtrace.FieldModule: "SupernodeStatusService", @@ -124,32 +125,130 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context) (StatusResponse, PeerAddresses: []string{}, } - // Collect P2P network information - if s.p2pService != nil { - p2pStats, err := s.p2pService.Stats(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { - if peersCount, ok := dhtStats["peers_count"].(int); ok { - resp.Network.PeersCount = int32(peersCount) - } + // Prepare P2P metrics container (always present in response) + metrics := P2PMetrics{ + NetworkHandleMetrics: map[string]HandleCounters{}, + ConnPoolMetrics: map[string]int64{}, + BanList: []BanEntry{}, + } - // Extract peer addresses - if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { - resp.Network.PeerAddresses = make([]string, 0, len(peers)) - for _, peer := range peers { - // Format peer address as "ID@IP:Port" - peerAddr := fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port) - resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, peerAddr) - } - } else { - resp.Network.PeerAddresses = []string{} - } - } - } - } + // Collect P2P network information and metrics (fill when available and requested) + if includeP2PMetrics && s.p2pService != nil { + p2pStats, err := s.p2pService.Stats(ctx) + if err != nil { + // Log error but continue - non-critical + logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) + } else { + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + if peersCount, ok := dhtStats["peers_count"].(int); ok { + resp.Network.PeersCount = int32(peersCount) + } + + // Extract peer addresses + if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { + resp.Network.PeerAddresses = make([]string, 0, len(peers)) + for _, peer := range peers { + // Format peer address as "ID@IP:Port" + peerAddr := fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port) + resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, peerAddr) + } + } else { + resp.Network.PeerAddresses = []string{} + } + } + + // Disk info + if du, ok := p2pStats["disk-info"].(utils.DiskStatus); ok { + metrics.Disk = DiskStatus{AllMB: du.All, UsedMB: du.Used, FreeMB: du.Free} + } else if duPtr, ok := p2pStats["disk-info"].(*utils.DiskStatus); ok && duPtr != nil { + metrics.Disk = DiskStatus{AllMB: duPtr.All, UsedMB: duPtr.Used, FreeMB: duPtr.Free} + } + + // Ban list + if bans, ok := p2pStats["ban-list"].([]kademlia.BanSnapshot); ok { + for _, b := range bans { + metrics.BanList = append(metrics.BanList, BanEntry{ + ID: b.ID, + IP: b.IP, + Port: uint32(b.Port), + Count: int32(b.Count), + CreatedAtUnix: b.CreatedAt.Unix(), + AgeSeconds: int64(b.Age.Seconds()), + }) + } + } + + // Conn pool metrics + if pool, ok := p2pStats["conn-pool"].(map[string]int64); ok { + for k, v := range pool { + metrics.ConnPoolMetrics[k] = v + } + } + + // DHT metrics and database/network counters live inside dht map + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + // Database + if db, ok := dhtStats["database"].(map[string]interface{}); ok { + var sizeMB float64 + if v, ok := db["p2p_db_size"].(float64); ok { + sizeMB = v + } + var recs int64 + switch v := db["p2p_db_records_count"].(type) { + case int: + recs = int64(v) + case int64: + recs = v + case float64: + recs = int64(v) + } + metrics.Database = DatabaseStats{P2PDBSizeMB: sizeMB, P2PDBRecordsCount: recs} + } + + // Network handle metrics + if nhm, ok := dhtStats["network"].(map[string]kademlia.HandleCounters); ok { + for k, c := range nhm { + metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } else if nhmI, ok := dhtStats["network"].(map[string]interface{}); ok { + for k, vi := range nhmI { + if c, ok := vi.(kademlia.HandleCounters); ok { + metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } + } + } + + // DHT rolling metrics snapshot is attached at top-level under dht_metrics + if snap, ok := p2pStats["dht_metrics"].(kademlia.DHTMetricsSnapshot); ok { + // Store success + for _, p := range snap.StoreSuccessRecent { + metrics.DhtMetrics.StoreSuccessRecent = append(metrics.DhtMetrics.StoreSuccessRecent, StoreSuccessPoint{ + TimeUnix: p.Time.Unix(), + Requests: int32(p.Requests), + Successful: int32(p.Successful), + SuccessRate: p.SuccessRate, + }) + } + // Batch retrieve + for _, p := range snap.BatchRetrieveRecent { + metrics.DhtMetrics.BatchRetrieveRecent = append(metrics.DhtMetrics.BatchRetrieveRecent, BatchRetrievePoint{ + TimeUnix: p.Time.Unix(), + Keys: int32(p.Keys), + Required: int32(p.Required), + FoundLocal: int32(p.FoundLocal), + FoundNetwork: int32(p.FoundNet), + DurationMS: p.Duration.Milliseconds(), + }) + } + metrics.DhtMetrics.HotPathBannedSkips = snap.HotPathBannedSkips + metrics.DhtMetrics.HotPathBanIncrements = snap.HotPathBanIncrements + } + } + } + + // Always include metrics (may be empty if not available) + resp.P2PMetrics = metrics // Calculate rank from top supernodes if s.lumeraClient != nil && s.config != nil { diff --git a/supernode/services/common/supernode/service_test.go b/supernode/services/common/supernode/service_test.go index 1969be2f..e2f82287 100644 --- a/supernode/services/common/supernode/service_test.go +++ b/supernode/services/common/supernode/service_test.go @@ -14,7 +14,7 @@ func TestSupernodeStatusService(t *testing.T) { t.Run("empty service", func(t *testing.T) { statusService := NewSupernodeStatusService(nil, nil, nil) - resp, err := statusService.GetStatus(ctx) + resp, err := statusService.GetStatus(ctx, false) assert.NoError(t, err) // Should have version info @@ -61,7 +61,7 @@ func TestSupernodeStatusService(t *testing.T) { } statusService.RegisterTaskProvider(mockProvider) - resp, err := statusService.GetStatus(ctx) + resp, err := statusService.GetStatus(ctx, false) assert.NoError(t, err) // Should have one service @@ -91,7 +91,7 @@ func TestSupernodeStatusService(t *testing.T) { statusService.RegisterTaskProvider(cascadeProvider) statusService.RegisterTaskProvider(senseProvider) - resp, err := statusService.GetStatus(ctx) + resp, err := statusService.GetStatus(ctx, false) assert.NoError(t, err) // Should have two services @@ -127,7 +127,7 @@ func TestSupernodeStatusService(t *testing.T) { } statusService.RegisterTaskProvider(mockProvider) - resp, err := statusService.GetStatus(ctx) + resp, err := statusService.GetStatus(ctx, false) assert.NoError(t, err) // Should have one service diff --git a/supernode/services/common/supernode/types.go b/supernode/services/common/supernode/types.go index fffeba8b..032aa0ee 100644 --- a/supernode/services/common/supernode/types.go +++ b/supernode/services/common/supernode/types.go @@ -3,14 +3,15 @@ package supernode // StatusResponse represents the complete system status information // with clear organization of resources and services type StatusResponse struct { - Version string // Supernode version - UptimeSeconds uint64 // Uptime in seconds - Resources Resources // System resource information - RunningTasks []ServiceTasks // Services with currently running tasks - RegisteredServices []string // All registered/available services - Network NetworkInfo // P2P network information - Rank int32 // Rank in the top supernodes list (0 if not in top list) - IPAddress string // Supernode IP address with port (e.g., "192.168.1.1:4445") + Version string // Supernode version + UptimeSeconds uint64 // Uptime in seconds + Resources Resources // System resource information + RunningTasks []ServiceTasks // Services with currently running tasks + RegisteredServices []string // All registered/available services + Network NetworkInfo // P2P network information + Rank int32 // Rank in the top supernodes list (0 if not in top list) + IPAddress string // Supernode IP address with port (e.g., "192.168.1.1:4445") + P2PMetrics P2PMetrics // Detailed P2P metrics snapshot } // Resources contains system resource metrics @@ -53,8 +54,68 @@ type ServiceTasks struct { // NetworkInfo contains P2P network information type NetworkInfo struct { - PeersCount int32 // Number of connected peers in P2P network - PeerAddresses []string // List of connected peer addresses (optional, may be empty for privacy) + PeersCount int32 // Number of connected peers in P2P network + PeerAddresses []string // List of connected peer addresses (optional, may be empty for privacy) +} + +// P2PMetrics mirrors the proto P2P metrics for status API +type P2PMetrics struct { + DhtMetrics DhtMetrics + NetworkHandleMetrics map[string]HandleCounters + ConnPoolMetrics map[string]int64 + BanList []BanEntry + Database DatabaseStats + Disk DiskStatus +} + +type StoreSuccessPoint struct { + TimeUnix int64 + Requests int32 + Successful int32 + SuccessRate float64 +} + +type BatchRetrievePoint struct { + TimeUnix int64 + Keys int32 + Required int32 + FoundLocal int32 + FoundNetwork int32 + DurationMS int64 +} + +type DhtMetrics struct { + StoreSuccessRecent []StoreSuccessPoint + BatchRetrieveRecent []BatchRetrievePoint + HotPathBannedSkips int64 + HotPathBanIncrements int64 +} + +type HandleCounters struct { + Total int64 + Success int64 + Failure int64 + Timeout int64 +} + +type BanEntry struct { + ID string + IP string + Port uint32 + Count int32 + CreatedAtUnix int64 + AgeSeconds int64 +} + +type DatabaseStats struct { + P2PDBSizeMB float64 + P2PDBRecordsCount int64 +} + +type DiskStatus struct { + AllMB float64 + UsedMB float64 + FreeMB float64 } // TaskProvider interface defines the contract for services to provide