diff --git a/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo.pb.go b/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo.pb.go index 89e8eeba0f7..33e8b42b625 100644 --- a/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo.pb.go +++ b/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo.pb.go @@ -9,6 +9,7 @@ package debuginfov1alpha1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -20,73 +21,177 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Source enum describes the source a debuginfo is from. -type DownloadInfo_Source int32 +// The strategy to use for uploading. +type InitiateUploadResponse_UploadStrategy int32 + +const ( + // The upload is not allowed. + InitiateUploadResponse_UPLOAD_STRATEGY_UNSPECIFIED InitiateUploadResponse_UploadStrategy = 0 + // The upload is allowed and should be done via the Upload RPC. + InitiateUploadResponse_UPLOAD_STRATEGY_GRPC InitiateUploadResponse_UploadStrategy = 1 + // The upload is allowed and should be done via a returned signed URL. + InitiateUploadResponse_UPLOAD_STRATEGY_SIGNED_URL InitiateUploadResponse_UploadStrategy = 2 +) + +// Enum value maps for InitiateUploadResponse_UploadStrategy. +var ( + InitiateUploadResponse_UploadStrategy_name = map[int32]string{ + 0: "UPLOAD_STRATEGY_UNSPECIFIED", + 1: "UPLOAD_STRATEGY_GRPC", + 2: "UPLOAD_STRATEGY_SIGNED_URL", + } + InitiateUploadResponse_UploadStrategy_value = map[string]int32{ + "UPLOAD_STRATEGY_UNSPECIFIED": 0, + "UPLOAD_STRATEGY_GRPC": 1, + "UPLOAD_STRATEGY_SIGNED_URL": 2, + } +) + +func (x InitiateUploadResponse_UploadStrategy) Enum() *InitiateUploadResponse_UploadStrategy { + p := new(InitiateUploadResponse_UploadStrategy) + *p = x + return p +} + +func (x InitiateUploadResponse_UploadStrategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (InitiateUploadResponse_UploadStrategy) Descriptor() protoreflect.EnumDescriptor { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes[0].Descriptor() +} + +func (InitiateUploadResponse_UploadStrategy) Type() protoreflect.EnumType { + return &file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes[0] +} + +func (x InitiateUploadResponse_UploadStrategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use InitiateUploadResponse_UploadStrategy.Descriptor instead. +func (InitiateUploadResponse_UploadStrategy) EnumDescriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{3, 0} +} + +// Source is the source of the debuginfo. +type Debuginfo_Source int32 const ( // To understand when no source is set we have the unknown source. - DownloadInfo_SOURCE_UNKNOWN_UNSPECIFIED DownloadInfo_Source = 0 + Debuginfo_SOURCE_UNKNOWN_UNSPECIFIED Debuginfo_Source = 0 // The debuginfo was uploaded by a user/agent. - DownloadInfo_SOURCE_UPLOAD DownloadInfo_Source = 1 - // The debuginfo was downloaded from a public debuginfod server. - DownloadInfo_SOURCE_DEBUGINFOD DownloadInfo_Source = 2 + Debuginfo_SOURCE_UPLOAD Debuginfo_Source = 1 + // The debuginfo is available from the configured debuginfod server(s). + Debuginfo_SOURCE_DEBUGINFOD Debuginfo_Source = 2 ) -// Enum value maps for DownloadInfo_Source. +// Enum value maps for Debuginfo_Source. var ( - DownloadInfo_Source_name = map[int32]string{ + Debuginfo_Source_name = map[int32]string{ 0: "SOURCE_UNKNOWN_UNSPECIFIED", 1: "SOURCE_UPLOAD", 2: "SOURCE_DEBUGINFOD", } - DownloadInfo_Source_value = map[string]int32{ + Debuginfo_Source_value = map[string]int32{ "SOURCE_UNKNOWN_UNSPECIFIED": 0, "SOURCE_UPLOAD": 1, "SOURCE_DEBUGINFOD": 2, } ) -func (x DownloadInfo_Source) Enum() *DownloadInfo_Source { - p := new(DownloadInfo_Source) +func (x Debuginfo_Source) Enum() *Debuginfo_Source { + p := new(Debuginfo_Source) *p = x return p } -func (x DownloadInfo_Source) String() string { +func (x Debuginfo_Source) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (DownloadInfo_Source) Descriptor() protoreflect.EnumDescriptor { - return file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes[0].Descriptor() +func (Debuginfo_Source) Descriptor() protoreflect.EnumDescriptor { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes[1].Descriptor() } -func (DownloadInfo_Source) Type() protoreflect.EnumType { - return &file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes[0] +func (Debuginfo_Source) Type() protoreflect.EnumType { + return &file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes[1] } -func (x DownloadInfo_Source) Number() protoreflect.EnumNumber { +func (x Debuginfo_Source) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use DownloadInfo_Source.Descriptor instead. -func (DownloadInfo_Source) EnumDescriptor() ([]byte, []int) { - return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{7, 0} +// Deprecated: Use Debuginfo_Source.Descriptor instead. +func (Debuginfo_Source) EnumDescriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{9, 0} } -// ExistsRequest request to determine if debug info exists for a given build_id -type ExistsRequest struct { +// The state of the debuginfo upload. +type DebuginfoUpload_State int32 + +const ( + // To understand when no upload state is set we have the unknown state. + DebuginfoUpload_STATE_UNKNOWN_UNSPECIFIED DebuginfoUpload_State = 0 + // The debuginfo is currently being uploaded. + DebuginfoUpload_STATE_UPLOADING DebuginfoUpload_State = 1 + // The debuginfo has been uploaded successfully. + DebuginfoUpload_STATE_UPLOADED DebuginfoUpload_State = 2 +) + +// Enum value maps for DebuginfoUpload_State. +var ( + DebuginfoUpload_State_name = map[int32]string{ + 0: "STATE_UNKNOWN_UNSPECIFIED", + 1: "STATE_UPLOADING", + 2: "STATE_UPLOADED", + } + DebuginfoUpload_State_value = map[string]int32{ + "STATE_UNKNOWN_UNSPECIFIED": 0, + "STATE_UPLOADING": 1, + "STATE_UPLOADED": 2, + } +) + +func (x DebuginfoUpload_State) Enum() *DebuginfoUpload_State { + p := new(DebuginfoUpload_State) + *p = x + return p +} + +func (x DebuginfoUpload_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DebuginfoUpload_State) Descriptor() protoreflect.EnumDescriptor { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes[2].Descriptor() +} + +func (DebuginfoUpload_State) Type() protoreflect.EnumType { + return &file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes[2] +} + +func (x DebuginfoUpload_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DebuginfoUpload_State.Descriptor instead. +func (DebuginfoUpload_State) EnumDescriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{10, 0} +} + +// ShouldInitiateUploadRequest is the request for ShouldInitiateUpload. +type ShouldInitiateUploadRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // build_id is a unique identifier for the debug data + // The build_id of the debuginfo. BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` - // hash is the hash of the debug information file - Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` } -func (x *ExistsRequest) Reset() { - *x = ExistsRequest{} +func (x *ShouldInitiateUploadRequest) Reset() { + *x = ShouldInitiateUploadRequest{} if protoimpl.UnsafeEnabled { mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -94,13 +199,13 @@ func (x *ExistsRequest) Reset() { } } -func (x *ExistsRequest) String() string { +func (x *ShouldInitiateUploadRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExistsRequest) ProtoMessage() {} +func (*ShouldInitiateUploadRequest) ProtoMessage() {} -func (x *ExistsRequest) ProtoReflect() protoreflect.Message { +func (x *ShouldInitiateUploadRequest) ProtoReflect() protoreflect.Message { mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -112,52 +217,157 @@ func (x *ExistsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExistsRequest.ProtoReflect.Descriptor instead. -func (*ExistsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use ShouldInitiateUploadRequest.ProtoReflect.Descriptor instead. +func (*ShouldInitiateUploadRequest) Descriptor() ([]byte, []int) { return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{0} } -func (x *ExistsRequest) GetBuildId() string { +func (x *ShouldInitiateUploadRequest) GetBuildId() string { if x != nil { return x.BuildId } return "" } -func (x *ExistsRequest) GetHash() string { +// ShouldInitiateUploadResponse is the response for ShouldInitiateUpload. +type ShouldInitiateUploadResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether an upload should be initiated or not. + ShouldInitiateUpload bool `protobuf:"varint,1,opt,name=should_initiate_upload,json=shouldInitiateUpload,proto3" json:"should_initiate_upload,omitempty"` +} + +func (x *ShouldInitiateUploadResponse) Reset() { + *x = ShouldInitiateUploadResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShouldInitiateUploadResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShouldInitiateUploadResponse) ProtoMessage() {} + +func (x *ShouldInitiateUploadResponse) ProtoReflect() protoreflect.Message { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShouldInitiateUploadResponse.ProtoReflect.Descriptor instead. +func (*ShouldInitiateUploadResponse) Descriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{1} +} + +func (x *ShouldInitiateUploadResponse) GetShouldInitiateUpload() bool { if x != nil { - return x.Hash + return x.ShouldInitiateUpload + } + return false +} + +// InitiateUploadRequest is the request to initiate an upload. +type InitiateUploadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The build_id of the debug info to upload. + BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + // The size of the debug info to upload. + Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *InitiateUploadRequest) Reset() { + *x = InitiateUploadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitiateUploadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitiateUploadRequest) ProtoMessage() {} + +func (x *InitiateUploadRequest) ProtoReflect() protoreflect.Message { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitiateUploadRequest.ProtoReflect.Descriptor instead. +func (*InitiateUploadRequest) Descriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{2} +} + +func (x *InitiateUploadRequest) GetBuildId() string { + if x != nil { + return x.BuildId } return "" } -// ExistsResponse returns whether the given build_id has debug info -type ExistsResponse struct { +func (x *InitiateUploadRequest) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +// InitiateUploadResponse is the response to an InitiateUploadRequest. +type InitiateUploadResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // exists indicates if there is debug data present for the given build_id - Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` + // The upload_id to use for uploading. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` + // The strategy to use for uploading. + UploadStrategy InitiateUploadResponse_UploadStrategy `protobuf:"varint,2,opt,name=upload_strategy,json=uploadStrategy,proto3,enum=parca.debuginfo.v1alpha1.InitiateUploadResponse_UploadStrategy" json:"upload_strategy,omitempty"` + // The signed url to use for uploading using a PUT request when the upload + // strategy is SIGNED_STRATEGY_URL. + SignedUrl string `protobuf:"bytes,3,opt,name=signed_url,json=signedUrl,proto3" json:"signed_url,omitempty"` } -func (x *ExistsResponse) Reset() { - *x = ExistsResponse{} +func (x *InitiateUploadResponse) Reset() { + *x = InitiateUploadResponse{} if protoimpl.UnsafeEnabled { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[1] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExistsResponse) String() string { +func (x *InitiateUploadResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExistsResponse) ProtoMessage() {} +func (*InitiateUploadResponse) ProtoMessage() {} -func (x *ExistsResponse) ProtoReflect() protoreflect.Message { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[1] +func (x *InitiateUploadResponse) ProtoReflect() protoreflect.Message { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -168,16 +378,127 @@ func (x *ExistsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExistsResponse.ProtoReflect.Descriptor instead. -func (*ExistsResponse) Descriptor() ([]byte, []int) { - return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{1} +// Deprecated: Use InitiateUploadResponse.ProtoReflect.Descriptor instead. +func (*InitiateUploadResponse) Descriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{3} } -func (x *ExistsResponse) GetExists() bool { +func (x *InitiateUploadResponse) GetUploadId() string { if x != nil { - return x.Exists + return x.UploadId } - return false + return "" +} + +func (x *InitiateUploadResponse) GetUploadStrategy() InitiateUploadResponse_UploadStrategy { + if x != nil { + return x.UploadStrategy + } + return InitiateUploadResponse_UPLOAD_STRATEGY_UNSPECIFIED +} + +func (x *InitiateUploadResponse) GetSignedUrl() string { + if x != nil { + return x.SignedUrl + } + return "" +} + +// MarkUploadFinishedRequest is the request to mark an upload as finished. +type MarkUploadFinishedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The build_id of the debug info to mark as finished. + BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + // The upload_id of the debug info to mark as finished. + UploadId string `protobuf:"bytes,2,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` +} + +func (x *MarkUploadFinishedRequest) Reset() { + *x = MarkUploadFinishedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MarkUploadFinishedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MarkUploadFinishedRequest) ProtoMessage() {} + +func (x *MarkUploadFinishedRequest) ProtoReflect() protoreflect.Message { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MarkUploadFinishedRequest.ProtoReflect.Descriptor instead. +func (*MarkUploadFinishedRequest) Descriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{4} +} + +func (x *MarkUploadFinishedRequest) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +func (x *MarkUploadFinishedRequest) GetUploadId() string { + if x != nil { + return x.UploadId + } + return "" +} + +// MarkUploadFinishedResponse is the response to a MarkUploadFinishedRequest. +type MarkUploadFinishedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MarkUploadFinishedResponse) Reset() { + *x = MarkUploadFinishedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MarkUploadFinishedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MarkUploadFinishedResponse) ProtoMessage() {} + +func (x *MarkUploadFinishedResponse) ProtoReflect() protoreflect.Message { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MarkUploadFinishedResponse.ProtoReflect.Descriptor instead. +func (*MarkUploadFinishedResponse) Descriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{5} } // UploadRequest upload debug info @@ -197,7 +518,7 @@ type UploadRequest struct { func (x *UploadRequest) Reset() { *x = UploadRequest{} if protoimpl.UnsafeEnabled { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[2] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -210,7 +531,7 @@ func (x *UploadRequest) String() string { func (*UploadRequest) ProtoMessage() {} func (x *UploadRequest) ProtoReflect() protoreflect.Message { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[2] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -223,7 +544,7 @@ func (x *UploadRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UploadRequest.ProtoReflect.Descriptor instead. func (*UploadRequest) Descriptor() ([]byte, []int) { - return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{2} + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{6} } func (m *UploadRequest) GetData() isUploadRequest_Data { @@ -273,14 +594,14 @@ type UploadInfo struct { // build_id is a unique identifier for the debug data BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` - // hash is the hash of the source file that debug information extracted from - Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + // upload_id is a unique identifier for the upload + UploadId string `protobuf:"bytes,2,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` } func (x *UploadInfo) Reset() { *x = UploadInfo{} if protoimpl.UnsafeEnabled { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[3] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -293,7 +614,7 @@ func (x *UploadInfo) String() string { func (*UploadInfo) ProtoMessage() {} func (x *UploadInfo) ProtoReflect() protoreflect.Message { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[3] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -306,7 +627,7 @@ func (x *UploadInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use UploadInfo.ProtoReflect.Descriptor instead. func (*UploadInfo) Descriptor() ([]byte, []int) { - return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{3} + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{7} } func (x *UploadInfo) GetBuildId() string { @@ -316,9 +637,9 @@ func (x *UploadInfo) GetBuildId() string { return "" } -func (x *UploadInfo) GetHash() string { +func (x *UploadInfo) GetUploadId() string { if x != nil { - return x.Hash + return x.UploadId } return "" } @@ -338,7 +659,7 @@ type UploadResponse struct { func (x *UploadResponse) Reset() { *x = UploadResponse{} if protoimpl.UnsafeEnabled { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[4] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -351,7 +672,7 @@ func (x *UploadResponse) String() string { func (*UploadResponse) ProtoMessage() {} func (x *UploadResponse) ProtoReflect() protoreflect.Message { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[4] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -364,7 +685,7 @@ func (x *UploadResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UploadResponse.ProtoReflect.Descriptor instead. func (*UploadResponse) Descriptor() ([]byte, []int) { - return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{4} + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{8} } func (x *UploadResponse) GetBuildId() string { @@ -381,33 +702,40 @@ func (x *UploadResponse) GetSize() uint64 { return 0 } -// DownloadRequest upload debug info -type DownloadRequest struct { +// Debuginfo contains metadata about a debuginfo file. +type Debuginfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // build_id is a unique identifier for the debug data + // BuildID is the build ID of the debuginfo. BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + // Source is the source of the debuginfo. + Source Debuginfo_Source `protobuf:"varint,2,opt,name=source,proto3,enum=parca.debuginfo.v1alpha1.Debuginfo_Source" json:"source,omitempty"` + // DebuginfoUpload is the debuginfo upload metadata. + Upload *DebuginfoUpload `protobuf:"bytes,3,opt,name=upload,proto3" json:"upload,omitempty"` + // Quality is the quality of the debuginfo. This is set asynchonously by the + // symbolizer when the debuginfo is actually used. + Quality *DebuginfoQuality `protobuf:"bytes,4,opt,name=quality,proto3" json:"quality,omitempty"` } -func (x *DownloadRequest) Reset() { - *x = DownloadRequest{} +func (x *Debuginfo) Reset() { + *x = Debuginfo{} if protoimpl.UnsafeEnabled { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[5] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DownloadRequest) String() string { +func (x *Debuginfo) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DownloadRequest) ProtoMessage() {} +func (*Debuginfo) ProtoMessage() {} -func (x *DownloadRequest) ProtoReflect() protoreflect.Message { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[5] +func (x *Debuginfo) ProtoReflect() protoreflect.Message { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -418,49 +746,72 @@ func (x *DownloadRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DownloadRequest.ProtoReflect.Descriptor instead. -func (*DownloadRequest) Descriptor() ([]byte, []int) { - return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{5} +// Deprecated: Use Debuginfo.ProtoReflect.Descriptor instead. +func (*Debuginfo) Descriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{9} } -func (x *DownloadRequest) GetBuildId() string { +func (x *Debuginfo) GetBuildId() string { if x != nil { return x.BuildId } return "" } -// DownloadRequest returns chunked data of the debuginfo. -type DownloadResponse struct { +func (x *Debuginfo) GetSource() Debuginfo_Source { + if x != nil { + return x.Source + } + return Debuginfo_SOURCE_UNKNOWN_UNSPECIFIED +} + +func (x *Debuginfo) GetUpload() *DebuginfoUpload { + if x != nil { + return x.Upload + } + return nil +} + +func (x *Debuginfo) GetQuality() *DebuginfoQuality { + if x != nil { + return x.Quality + } + return nil +} + +// DebuginfoUpload contains metadata about a debuginfo upload. +type DebuginfoUpload struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // data contains either the upload info metadata or the debug info - // - // Types that are assignable to Data: - // *DownloadResponse_Info - // *DownloadResponse_ChunkData - Data isDownloadResponse_Data `protobuf_oneof:"data"` + // UploadID is the ID of the debuginfo upload. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // State is the current state of the debuginfo upload. + State DebuginfoUpload_State `protobuf:"varint,2,opt,name=state,proto3,enum=parca.debuginfo.v1alpha1.DebuginfoUpload_State" json:"state,omitempty"` + // StartedAt is the time the debuginfo upload was started. + StartedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + // FinishedAt is the time the debuginfo upload was finished. + FinishedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` } -func (x *DownloadResponse) Reset() { - *x = DownloadResponse{} +func (x *DebuginfoUpload) Reset() { + *x = DebuginfoUpload{} if protoimpl.UnsafeEnabled { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[6] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DownloadResponse) String() string { +func (x *DebuginfoUpload) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DownloadResponse) ProtoMessage() {} +func (*DebuginfoUpload) ProtoMessage() {} -func (x *DownloadResponse) ProtoReflect() protoreflect.Message { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[6] +func (x *DebuginfoUpload) ProtoReflect() protoreflect.Message { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -471,77 +822,66 @@ func (x *DownloadResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DownloadResponse.ProtoReflect.Descriptor instead. -func (*DownloadResponse) Descriptor() ([]byte, []int) { - return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{6} +// Deprecated: Use DebuginfoUpload.ProtoReflect.Descriptor instead. +func (*DebuginfoUpload) Descriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{10} } -func (m *DownloadResponse) GetData() isDownloadResponse_Data { - if m != nil { - return m.Data +func (x *DebuginfoUpload) GetId() string { + if x != nil { + return x.Id } - return nil + return "" } -func (x *DownloadResponse) GetInfo() *DownloadInfo { - if x, ok := x.GetData().(*DownloadResponse_Info); ok { - return x.Info +func (x *DebuginfoUpload) GetState() DebuginfoUpload_State { + if x != nil { + return x.State } - return nil + return DebuginfoUpload_STATE_UNKNOWN_UNSPECIFIED } -func (x *DownloadResponse) GetChunkData() []byte { - if x, ok := x.GetData().(*DownloadResponse_ChunkData); ok { - return x.ChunkData +func (x *DebuginfoUpload) GetStartedAt() *timestamppb.Timestamp { + if x != nil { + return x.StartedAt } return nil } -type isDownloadResponse_Data interface { - isDownloadResponse_Data() -} - -type DownloadResponse_Info struct { - // info is the metadata for the debug info - Info *DownloadInfo `protobuf:"bytes,1,opt,name=info,proto3,oneof"` -} - -type DownloadResponse_ChunkData struct { - // chunk_data is the raw bytes of the debug info - ChunkData []byte `protobuf:"bytes,2,opt,name=chunk_data,json=chunkData,proto3,oneof"` +func (x *DebuginfoUpload) GetFinishedAt() *timestamppb.Timestamp { + if x != nil { + return x.FinishedAt + } + return nil } -func (*DownloadResponse_Info) isDownloadResponse_Data() {} - -func (*DownloadResponse_ChunkData) isDownloadResponse_Data() {} - -// DownloadInfo metadata for the debug data that is being downloaded. -type DownloadInfo struct { +// DebuginfoQuality is the quality of the debuginfo. +type DebuginfoQuality struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Source indicates the origin of the debuginfo being downloaded. - Source DownloadInfo_Source `protobuf:"varint,1,opt,name=source,proto3,enum=parca.debuginfo.v1alpha1.DownloadInfo_Source" json:"source,omitempty"` + // The debuginfo file is not a valid ELF file. + NotValidElf bool `protobuf:"varint,1,opt,name=not_valid_elf,json=notValidElf,proto3" json:"not_valid_elf,omitempty"` } -func (x *DownloadInfo) Reset() { - *x = DownloadInfo{} +func (x *DebuginfoQuality) Reset() { + *x = DebuginfoQuality{} if protoimpl.UnsafeEnabled { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[7] + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DownloadInfo) String() string { +func (x *DebuginfoQuality) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DownloadInfo) ProtoMessage() {} +func (*DebuginfoQuality) ProtoMessage() {} -func (x *DownloadInfo) ProtoReflect() protoreflect.Message { - mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[7] +func (x *DebuginfoQuality) ProtoReflect() protoreflect.Message { + mi := &file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -552,16 +892,16 @@ func (x *DownloadInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DownloadInfo.ProtoReflect.Descriptor instead. -func (*DownloadInfo) Descriptor() ([]byte, []int) { - return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{7} +// Deprecated: Use DebuginfoQuality.ProtoReflect.Descriptor instead. +func (*DebuginfoQuality) Descriptor() ([]byte, []int) { + return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP(), []int{11} } -func (x *DownloadInfo) GetSource() DownloadInfo_Source { +func (x *DebuginfoQuality) GetNotValidElf() bool { if x != nil { - return x.Source + return x.NotValidElf } - return DownloadInfo_SOURCE_UNKNOWN_UNSPECIFIED + return false } var File_parca_debuginfo_v1alpha1_debuginfo_proto protoreflect.FileDescriptor @@ -571,13 +911,48 @@ var file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDesc = []byte{ 0x6f, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x22, 0x3e, 0x0a, 0x0d, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x68, 0x61, 0x73, 0x68, 0x22, 0x28, 0x0a, 0x0e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x74, + 0x70, 0x68, 0x61, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x38, 0x0a, 0x1b, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x49, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x22, + 0x54, 0x0a, 0x1c, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x34, 0x0a, 0x16, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x74, 0x65, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x14, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x55, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x46, 0x0a, 0x15, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, + 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xab, 0x02, + 0x0a, 0x16, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x68, 0x0a, 0x0f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, + 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x74, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, + 0x0e, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x22, 0x6b, + 0x0a, 0x0e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, + 0x12, 0x1f, 0x0a, 0x1b, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x53, 0x54, 0x52, 0x41, 0x54, + 0x45, 0x47, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x53, 0x54, 0x52, 0x41, + 0x54, 0x45, 0x47, 0x59, 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x55, + 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x53, 0x54, 0x52, 0x41, 0x54, 0x45, 0x47, 0x59, 0x5f, 0x53, + 0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f, 0x55, 0x52, 0x4c, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x19, 0x4d, + 0x61, 0x72, 0x6b, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, + 0x22, 0x1c, 0x0a, 0x1a, 0x4d, 0x61, 0x72, 0x6b, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x74, 0x0a, 0x0d, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, @@ -585,73 +960,107 @@ var file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDesc = []byte{ 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x44, 0x61, 0x74, 0x61, 0x42, 0x06, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x3b, 0x0a, 0x0a, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x44, 0x0a, 0x0a, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, - 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, - 0x68, 0x22, 0x3f, 0x0a, 0x0e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, - 0x7a, 0x65, 0x22, 0x2c, 0x0a, 0x0f, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, - 0x22, 0x79, 0x0a, 0x10, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x6f, - 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x04, 0x69, 0x6e, - 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x44, - 0x61, 0x74, 0x61, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xa9, 0x01, 0x0a, 0x0c, - 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x45, 0x0a, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x70, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x3f, 0x0a, 0x0e, 0x55, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xc7, 0x02, 0x0a, 0x09, + 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x75, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, + 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x55, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x52, 0x06, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x44, 0x0a, 0x07, 0x71, + 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x22, 0x52, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1e, 0x0a, - 0x1a, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, - 0x0d, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x01, - 0x12, 0x15, 0x0a, 0x11, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, - 0x49, 0x4e, 0x46, 0x4f, 0x44, 0x10, 0x02, 0x32, 0xb9, 0x02, 0x0a, 0x10, 0x44, 0x65, 0x62, 0x75, - 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x06, - 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x27, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x28, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, - 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x06, 0x55, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x27, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, - 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x65, 0x0a, 0x08, - 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x29, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, + 0x6f, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x07, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x22, 0x52, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x53, + 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x53, + 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x10, 0x01, 0x12, 0x15, + 0x0a, 0x11, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x49, 0x4e, + 0x46, 0x4f, 0x44, 0x10, 0x02, 0x22, 0xb1, 0x02, 0x0a, 0x0f, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, + 0x6e, 0x66, 0x6f, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x45, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, - 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, - 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x30, 0x01, 0x42, 0x84, 0x02, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x61, 0x72, 0x63, + 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x55, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x66, + 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x41, 0x74, 0x22, 0x4f, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x50, 0x4c, 0x4f, 0x41, 0x44, + 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, + 0x50, 0x4c, 0x4f, 0x41, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x36, 0x0a, 0x10, 0x44, 0x65, 0x62, + 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, + 0x0d, 0x6e, 0x6f, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x65, 0x6c, 0x66, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6e, 0x6f, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x45, 0x6c, + 0x66, 0x32, 0xf8, 0x03, 0x0a, 0x10, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x06, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x27, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, + 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x42, 0x0e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x52, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x70, 0x61, 0x72, - 0x63, 0x61, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, - 0x70, 0x61, 0x72, 0x63, 0x61, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2f, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, - 0x66, 0x6f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x50, 0x44, 0x58, - 0xaa, 0x02, 0x18, 0x50, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, - 0x66, 0x6f, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x18, 0x50, 0x61, - 0x72, 0x63, 0x61, 0x5c, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x5c, 0x56, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x24, 0x50, 0x61, 0x72, 0x63, 0x61, 0x5c, 0x44, - 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, - 0x50, 0x61, 0x72, 0x63, 0x61, 0x3a, 0x3a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, - 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x87, 0x01, 0x0a, 0x14, 0x53, 0x68, 0x6f, 0x75, + 0x6c, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x35, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, + 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x75, + 0x6c, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, + 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x75, 0x0a, 0x0e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x55, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x12, 0x2f, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, + 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x81, 0x01, 0x0a, 0x12, 0x4d, 0x61, 0x72, + 0x6b, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, + 0x33, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, + 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x72, 0x6b, 0x55, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x4d, 0x61, 0x72, 0x6b, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x84, 0x02, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0e, 0x44, + 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x52, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x61, 0x72, 0x63, + 0x61, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x61, 0x72, 0x63, 0x61, 0x2f, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x3b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x50, 0x44, 0x58, 0xaa, 0x02, 0x18, 0x50, 0x61, 0x72, 0x63, + 0x61, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x56, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x18, 0x50, 0x61, 0x72, 0x63, 0x61, 0x5c, 0x44, 0x65, 0x62, + 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, + 0x02, 0x24, 0x50, 0x61, 0x72, 0x63, 0x61, 0x5c, 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, + 0x6f, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1a, 0x50, 0x61, 0x72, 0x63, 0x61, 0x3a, 0x3a, + 0x44, 0x65, 0x62, 0x75, 0x67, 0x69, 0x6e, 0x66, 0x6f, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -666,34 +1075,48 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescGZIP() []byte { return file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDescData } -var file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_parca_debuginfo_v1alpha1_debuginfo_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes = make([]protoimpl.MessageInfo, 12) var file_parca_debuginfo_v1alpha1_debuginfo_proto_goTypes = []interface{}{ - (DownloadInfo_Source)(0), // 0: parca.debuginfo.v1alpha1.DownloadInfo.Source - (*ExistsRequest)(nil), // 1: parca.debuginfo.v1alpha1.ExistsRequest - (*ExistsResponse)(nil), // 2: parca.debuginfo.v1alpha1.ExistsResponse - (*UploadRequest)(nil), // 3: parca.debuginfo.v1alpha1.UploadRequest - (*UploadInfo)(nil), // 4: parca.debuginfo.v1alpha1.UploadInfo - (*UploadResponse)(nil), // 5: parca.debuginfo.v1alpha1.UploadResponse - (*DownloadRequest)(nil), // 6: parca.debuginfo.v1alpha1.DownloadRequest - (*DownloadResponse)(nil), // 7: parca.debuginfo.v1alpha1.DownloadResponse - (*DownloadInfo)(nil), // 8: parca.debuginfo.v1alpha1.DownloadInfo + (InitiateUploadResponse_UploadStrategy)(0), // 0: parca.debuginfo.v1alpha1.InitiateUploadResponse.UploadStrategy + (Debuginfo_Source)(0), // 1: parca.debuginfo.v1alpha1.Debuginfo.Source + (DebuginfoUpload_State)(0), // 2: parca.debuginfo.v1alpha1.DebuginfoUpload.State + (*ShouldInitiateUploadRequest)(nil), // 3: parca.debuginfo.v1alpha1.ShouldInitiateUploadRequest + (*ShouldInitiateUploadResponse)(nil), // 4: parca.debuginfo.v1alpha1.ShouldInitiateUploadResponse + (*InitiateUploadRequest)(nil), // 5: parca.debuginfo.v1alpha1.InitiateUploadRequest + (*InitiateUploadResponse)(nil), // 6: parca.debuginfo.v1alpha1.InitiateUploadResponse + (*MarkUploadFinishedRequest)(nil), // 7: parca.debuginfo.v1alpha1.MarkUploadFinishedRequest + (*MarkUploadFinishedResponse)(nil), // 8: parca.debuginfo.v1alpha1.MarkUploadFinishedResponse + (*UploadRequest)(nil), // 9: parca.debuginfo.v1alpha1.UploadRequest + (*UploadInfo)(nil), // 10: parca.debuginfo.v1alpha1.UploadInfo + (*UploadResponse)(nil), // 11: parca.debuginfo.v1alpha1.UploadResponse + (*Debuginfo)(nil), // 12: parca.debuginfo.v1alpha1.Debuginfo + (*DebuginfoUpload)(nil), // 13: parca.debuginfo.v1alpha1.DebuginfoUpload + (*DebuginfoQuality)(nil), // 14: parca.debuginfo.v1alpha1.DebuginfoQuality + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp } var file_parca_debuginfo_v1alpha1_debuginfo_proto_depIdxs = []int32{ - 4, // 0: parca.debuginfo.v1alpha1.UploadRequest.info:type_name -> parca.debuginfo.v1alpha1.UploadInfo - 8, // 1: parca.debuginfo.v1alpha1.DownloadResponse.info:type_name -> parca.debuginfo.v1alpha1.DownloadInfo - 0, // 2: parca.debuginfo.v1alpha1.DownloadInfo.source:type_name -> parca.debuginfo.v1alpha1.DownloadInfo.Source - 1, // 3: parca.debuginfo.v1alpha1.DebugInfoService.Exists:input_type -> parca.debuginfo.v1alpha1.ExistsRequest - 3, // 4: parca.debuginfo.v1alpha1.DebugInfoService.Upload:input_type -> parca.debuginfo.v1alpha1.UploadRequest - 6, // 5: parca.debuginfo.v1alpha1.DebugInfoService.Download:input_type -> parca.debuginfo.v1alpha1.DownloadRequest - 2, // 6: parca.debuginfo.v1alpha1.DebugInfoService.Exists:output_type -> parca.debuginfo.v1alpha1.ExistsResponse - 5, // 7: parca.debuginfo.v1alpha1.DebugInfoService.Upload:output_type -> parca.debuginfo.v1alpha1.UploadResponse - 7, // 8: parca.debuginfo.v1alpha1.DebugInfoService.Download:output_type -> parca.debuginfo.v1alpha1.DownloadResponse - 6, // [6:9] is the sub-list for method output_type - 3, // [3:6] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 0, // 0: parca.debuginfo.v1alpha1.InitiateUploadResponse.upload_strategy:type_name -> parca.debuginfo.v1alpha1.InitiateUploadResponse.UploadStrategy + 10, // 1: parca.debuginfo.v1alpha1.UploadRequest.info:type_name -> parca.debuginfo.v1alpha1.UploadInfo + 1, // 2: parca.debuginfo.v1alpha1.Debuginfo.source:type_name -> parca.debuginfo.v1alpha1.Debuginfo.Source + 13, // 3: parca.debuginfo.v1alpha1.Debuginfo.upload:type_name -> parca.debuginfo.v1alpha1.DebuginfoUpload + 14, // 4: parca.debuginfo.v1alpha1.Debuginfo.quality:type_name -> parca.debuginfo.v1alpha1.DebuginfoQuality + 2, // 5: parca.debuginfo.v1alpha1.DebuginfoUpload.state:type_name -> parca.debuginfo.v1alpha1.DebuginfoUpload.State + 15, // 6: parca.debuginfo.v1alpha1.DebuginfoUpload.started_at:type_name -> google.protobuf.Timestamp + 15, // 7: parca.debuginfo.v1alpha1.DebuginfoUpload.finished_at:type_name -> google.protobuf.Timestamp + 9, // 8: parca.debuginfo.v1alpha1.DebuginfoService.Upload:input_type -> parca.debuginfo.v1alpha1.UploadRequest + 3, // 9: parca.debuginfo.v1alpha1.DebuginfoService.ShouldInitiateUpload:input_type -> parca.debuginfo.v1alpha1.ShouldInitiateUploadRequest + 5, // 10: parca.debuginfo.v1alpha1.DebuginfoService.InitiateUpload:input_type -> parca.debuginfo.v1alpha1.InitiateUploadRequest + 7, // 11: parca.debuginfo.v1alpha1.DebuginfoService.MarkUploadFinished:input_type -> parca.debuginfo.v1alpha1.MarkUploadFinishedRequest + 11, // 12: parca.debuginfo.v1alpha1.DebuginfoService.Upload:output_type -> parca.debuginfo.v1alpha1.UploadResponse + 4, // 13: parca.debuginfo.v1alpha1.DebuginfoService.ShouldInitiateUpload:output_type -> parca.debuginfo.v1alpha1.ShouldInitiateUploadResponse + 6, // 14: parca.debuginfo.v1alpha1.DebuginfoService.InitiateUpload:output_type -> parca.debuginfo.v1alpha1.InitiateUploadResponse + 8, // 15: parca.debuginfo.v1alpha1.DebuginfoService.MarkUploadFinished:output_type -> parca.debuginfo.v1alpha1.MarkUploadFinishedResponse + 12, // [12:16] is the sub-list for method output_type + 8, // [8:12] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_parca_debuginfo_v1alpha1_debuginfo_proto_init() } @@ -703,7 +1126,7 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_init() { } if !protoimpl.UnsafeEnabled { file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExistsRequest); i { + switch v := v.(*ShouldInitiateUploadRequest); i { case 0: return &v.state case 1: @@ -715,7 +1138,7 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_init() { } } file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExistsResponse); i { + switch v := v.(*ShouldInitiateUploadResponse); i { case 0: return &v.state case 1: @@ -727,7 +1150,7 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_init() { } } file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UploadRequest); i { + switch v := v.(*InitiateUploadRequest); i { case 0: return &v.state case 1: @@ -739,7 +1162,7 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_init() { } } file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UploadInfo); i { + switch v := v.(*InitiateUploadResponse); i { case 0: return &v.state case 1: @@ -751,7 +1174,7 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_init() { } } file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UploadResponse); i { + switch v := v.(*MarkUploadFinishedRequest); i { case 0: return &v.state case 1: @@ -763,7 +1186,7 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_init() { } } file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DownloadRequest); i { + switch v := v.(*MarkUploadFinishedResponse); i { case 0: return &v.state case 1: @@ -775,7 +1198,7 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_init() { } } file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DownloadResponse); i { + switch v := v.(*UploadRequest); i { case 0: return &v.state case 1: @@ -787,7 +1210,55 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_init() { } } file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DownloadInfo); i { + switch v := v.(*UploadInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UploadResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Debuginfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DebuginfoUpload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DebuginfoQuality); i { case 0: return &v.state case 1: @@ -799,21 +1270,17 @@ func file_parca_debuginfo_v1alpha1_debuginfo_proto_init() { } } } - file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[6].OneofWrappers = []interface{}{ (*UploadRequest_Info)(nil), (*UploadRequest_ChunkData)(nil), } - file_parca_debuginfo_v1alpha1_debuginfo_proto_msgTypes[6].OneofWrappers = []interface{}{ - (*DownloadResponse_Info)(nil), - (*DownloadResponse_ChunkData)(nil), - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_parca_debuginfo_v1alpha1_debuginfo_proto_rawDesc, - NumEnums: 1, - NumMessages: 8, + NumEnums: 3, + NumMessages: 12, NumExtensions: 0, NumServices: 1, }, diff --git a/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo.pb.gw.go b/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo.pb.gw.go index b7849789f2a..b18ec48c804 100644 --- a/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo.pb.gw.go +++ b/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo.pb.gw.go @@ -31,41 +31,7 @@ var _ = runtime.String var _ = utilities.NewDoubleArray var _ = metadata.Join -func request_DebugInfoService_Exists_0(ctx context.Context, marshaler runtime.Marshaler, client DebugInfoServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExistsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Exists(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_DebugInfoService_Exists_0(ctx context.Context, marshaler runtime.Marshaler, server DebugInfoServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ExistsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Exists(ctx, &protoReq) - return msg, metadata, err - -} - -func request_DebugInfoService_Upload_0(ctx context.Context, marshaler runtime.Marshaler, client DebugInfoServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { +func request_DebuginfoService_Upload_0(ctx context.Context, marshaler runtime.Marshaler, client DebuginfoServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.Upload(ctx) if err != nil { @@ -109,8 +75,8 @@ func request_DebugInfoService_Upload_0(ctx context.Context, marshaler runtime.Ma } -func request_DebugInfoService_Download_0(ctx context.Context, marshaler runtime.Marshaler, client DebugInfoServiceClient, req *http.Request, pathParams map[string]string) (DebugInfoService_DownloadClient, runtime.ServerMetadata, error) { - var protoReq DownloadRequest +func request_DebuginfoService_ShouldInitiateUpload_0(ctx context.Context, marshaler runtime.Marshaler, client DebuginfoServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ShouldInitiateUploadRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) @@ -121,26 +87,110 @@ func request_DebugInfoService_Download_0(ctx context.Context, marshaler runtime. return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - stream, err := client.Download(ctx, &protoReq) - if err != nil { - return nil, metadata, err + msg, err := client.ShouldInitiateUpload(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_DebuginfoService_ShouldInitiateUpload_0(ctx context.Context, marshaler runtime.Marshaler, server DebuginfoServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ShouldInitiateUploadRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } - header, err := stream.Header() - if err != nil { - return nil, metadata, err + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - metadata.HeaderMD = header - return stream, metadata, nil + + msg, err := server.ShouldInitiateUpload(ctx, &protoReq) + return msg, metadata, err + +} + +func request_DebuginfoService_InitiateUpload_0(ctx context.Context, marshaler runtime.Marshaler, client DebuginfoServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq InitiateUploadRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.InitiateUpload(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_DebuginfoService_InitiateUpload_0(ctx context.Context, marshaler runtime.Marshaler, server DebuginfoServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq InitiateUploadRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.InitiateUpload(ctx, &protoReq) + return msg, metadata, err } -// RegisterDebugInfoServiceHandlerServer registers the http handlers for service DebugInfoService to "mux". -// UnaryRPC :call DebugInfoServiceServer directly. +func request_DebuginfoService_MarkUploadFinished_0(ctx context.Context, marshaler runtime.Marshaler, client DebuginfoServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq MarkUploadFinishedRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.MarkUploadFinished(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_DebuginfoService_MarkUploadFinished_0(ctx context.Context, marshaler runtime.Marshaler, server DebuginfoServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq MarkUploadFinishedRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.MarkUploadFinished(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterDebuginfoServiceHandlerServer registers the http handlers for service DebuginfoService to "mux". +// UnaryRPC :call DebuginfoServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterDebugInfoServiceHandlerFromEndpoint instead. -func RegisterDebugInfoServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server DebugInfoServiceServer) error { +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterDebuginfoServiceHandlerFromEndpoint instead. +func RegisterDebuginfoServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server DebuginfoServiceServer) error { - mux.Handle("POST", pattern_DebugInfoService_Exists_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("POST", pattern_DebuginfoService_Upload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + + mux.Handle("POST", pattern_DebuginfoService_ShouldInitiateUpload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -148,12 +198,12 @@ func RegisterDebugInfoServiceHandlerServer(ctx context.Context, mux *runtime.Ser inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) var err error var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebugInfoService/Exists", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebugInfoService/Exists")) + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebuginfoService/ShouldInitiateUpload", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebuginfoService/ShouldInitiateUpload")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_DebugInfoService_Exists_0(annotatedContext, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_DebuginfoService_ShouldInitiateUpload_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { @@ -161,30 +211,66 @@ func RegisterDebugInfoServiceHandlerServer(ctx context.Context, mux *runtime.Ser return } - forward_DebugInfoService_Exists_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_DebuginfoService_ShouldInitiateUpload_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("POST", pattern_DebugInfoService_Upload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + mux.Handle("POST", pattern_DebuginfoService_InitiateUpload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebuginfoService/InitiateUpload", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebuginfoService/InitiateUpload")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_DebuginfoService_InitiateUpload_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_DebuginfoService_InitiateUpload_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) - mux.Handle("POST", pattern_DebugInfoService_Download_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") - _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return + mux.Handle("POST", pattern_DebuginfoService_MarkUploadFinished_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebuginfoService/MarkUploadFinished", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebuginfoService/MarkUploadFinished")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_DebuginfoService_MarkUploadFinished_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_DebuginfoService_MarkUploadFinished_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) return nil } -// RegisterDebugInfoServiceHandlerFromEndpoint is same as RegisterDebugInfoServiceHandler but +// RegisterDebuginfoServiceHandlerFromEndpoint is same as RegisterDebuginfoServiceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterDebugInfoServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { +func RegisterDebuginfoServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { conn, err := grpc.Dial(endpoint, opts...) if err != nil { return err @@ -204,85 +290,107 @@ func RegisterDebugInfoServiceHandlerFromEndpoint(ctx context.Context, mux *runti }() }() - return RegisterDebugInfoServiceHandler(ctx, mux, conn) + return RegisterDebuginfoServiceHandler(ctx, mux, conn) } -// RegisterDebugInfoServiceHandler registers the http handlers for service DebugInfoService to "mux". +// RegisterDebuginfoServiceHandler registers the http handlers for service DebuginfoService to "mux". // The handlers forward requests to the grpc endpoint over "conn". -func RegisterDebugInfoServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterDebugInfoServiceHandlerClient(ctx, mux, NewDebugInfoServiceClient(conn)) +func RegisterDebuginfoServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterDebuginfoServiceHandlerClient(ctx, mux, NewDebuginfoServiceClient(conn)) } -// RegisterDebugInfoServiceHandlerClient registers the http handlers for service DebugInfoService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "DebugInfoServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "DebugInfoServiceClient" +// RegisterDebuginfoServiceHandlerClient registers the http handlers for service DebuginfoService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "DebuginfoServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "DebuginfoServiceClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "DebugInfoServiceClient" to call the correct interceptors. -func RegisterDebugInfoServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DebugInfoServiceClient) error { +// "DebuginfoServiceClient" to call the correct interceptors. +func RegisterDebuginfoServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DebuginfoServiceClient) error { - mux.Handle("POST", pattern_DebugInfoService_Exists_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("POST", pattern_DebuginfoService_Upload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) var err error var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebugInfoService/Exists", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebugInfoService/Exists")) + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebuginfoService/Upload", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebuginfoService/Upload")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_DebugInfoService_Exists_0(annotatedContext, inboundMarshaler, client, req, pathParams) + resp, md, err := request_DebuginfoService_Upload_0(annotatedContext, inboundMarshaler, client, req, pathParams) annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_DebugInfoService_Exists_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_DebuginfoService_Upload_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("POST", pattern_DebugInfoService_Upload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("POST", pattern_DebuginfoService_ShouldInitiateUpload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) var err error var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebugInfoService/Upload", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebugInfoService/Upload")) + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebuginfoService/ShouldInitiateUpload", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebuginfoService/ShouldInitiateUpload")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_DebugInfoService_Upload_0(annotatedContext, inboundMarshaler, client, req, pathParams) + resp, md, err := request_DebuginfoService_ShouldInitiateUpload_0(annotatedContext, inboundMarshaler, client, req, pathParams) annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_DebugInfoService_Upload_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_DebuginfoService_ShouldInitiateUpload_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("POST", pattern_DebugInfoService_Download_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("POST", pattern_DebuginfoService_InitiateUpload_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) var err error var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebugInfoService/Download", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebugInfoService/Download")) + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebuginfoService/InitiateUpload", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebuginfoService/InitiateUpload")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_DebugInfoService_Download_0(annotatedContext, inboundMarshaler, client, req, pathParams) + resp, md, err := request_DebuginfoService_InitiateUpload_0(annotatedContext, inboundMarshaler, client, req, pathParams) annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_DebugInfoService_Download_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_DebuginfoService_InitiateUpload_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_DebuginfoService_MarkUploadFinished_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/parca.debuginfo.v1alpha1.DebuginfoService/MarkUploadFinished", runtime.WithHTTPPathPattern("/parca.debuginfo.v1alpha1.DebuginfoService/MarkUploadFinished")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_DebuginfoService_MarkUploadFinished_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_DebuginfoService_MarkUploadFinished_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -290,17 +398,21 @@ func RegisterDebugInfoServiceHandlerClient(ctx context.Context, mux *runtime.Ser } var ( - pattern_DebugInfoService_Exists_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"parca.debuginfo.v1alpha1.DebugInfoService", "Exists"}, "")) + pattern_DebuginfoService_Upload_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"parca.debuginfo.v1alpha1.DebuginfoService", "Upload"}, "")) - pattern_DebugInfoService_Upload_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"parca.debuginfo.v1alpha1.DebugInfoService", "Upload"}, "")) + pattern_DebuginfoService_ShouldInitiateUpload_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"parca.debuginfo.v1alpha1.DebuginfoService", "ShouldInitiateUpload"}, "")) - pattern_DebugInfoService_Download_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"parca.debuginfo.v1alpha1.DebugInfoService", "Download"}, "")) + pattern_DebuginfoService_InitiateUpload_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"parca.debuginfo.v1alpha1.DebuginfoService", "InitiateUpload"}, "")) + + pattern_DebuginfoService_MarkUploadFinished_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"parca.debuginfo.v1alpha1.DebuginfoService", "MarkUploadFinished"}, "")) ) var ( - forward_DebugInfoService_Exists_0 = runtime.ForwardResponseMessage + forward_DebuginfoService_Upload_0 = runtime.ForwardResponseMessage + + forward_DebuginfoService_ShouldInitiateUpload_0 = runtime.ForwardResponseMessage - forward_DebugInfoService_Upload_0 = runtime.ForwardResponseMessage + forward_DebuginfoService_InitiateUpload_0 = runtime.ForwardResponseMessage - forward_DebugInfoService_Download_0 = runtime.ForwardResponseStream + forward_DebuginfoService_MarkUploadFinished_0 = runtime.ForwardResponseMessage ) diff --git a/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo_vtproto.pb.go b/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo_vtproto.pb.go index 874562d462f..be9c026545b 100644 --- a/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo_vtproto.pb.go +++ b/gen/proto/go/parca/debuginfo/v1alpha1/debuginfo_vtproto.pb.go @@ -10,7 +10,9 @@ import ( grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" io "io" bits "math/bits" ) @@ -27,59 +29,52 @@ const ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 -// DebugInfoServiceClient is the client API for DebugInfoService service. +// DebuginfoServiceClient is the client API for DebuginfoService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type DebugInfoServiceClient interface { - // Exists returns true if the given build_id has debug info uploaded for it. - Exists(ctx context.Context, in *ExistsRequest, opts ...grpc.CallOption) (*ExistsResponse, error) +type DebuginfoServiceClient interface { // Upload ingests debug info for a given build_id - Upload(ctx context.Context, opts ...grpc.CallOption) (DebugInfoService_UploadClient, error) - // Download returns the debug info for a given build_id. - Download(ctx context.Context, in *DownloadRequest, opts ...grpc.CallOption) (DebugInfoService_DownloadClient, error) + Upload(ctx context.Context, opts ...grpc.CallOption) (DebuginfoService_UploadClient, error) + // ShouldInitiateUpload returns whether an upload for a given build_id should be initiated or not. + ShouldInitiateUpload(ctx context.Context, in *ShouldInitiateUploadRequest, opts ...grpc.CallOption) (*ShouldInitiateUploadResponse, error) + // InitiateUpload returns a strategy and information to upload debug info for a given build_id. + InitiateUpload(ctx context.Context, in *InitiateUploadRequest, opts ...grpc.CallOption) (*InitiateUploadResponse, error) + // MarkUploadFinished marks the upload as finished for a given build_id. + MarkUploadFinished(ctx context.Context, in *MarkUploadFinishedRequest, opts ...grpc.CallOption) (*MarkUploadFinishedResponse, error) } -type debugInfoServiceClient struct { +type debuginfoServiceClient struct { cc grpc.ClientConnInterface } -func NewDebugInfoServiceClient(cc grpc.ClientConnInterface) DebugInfoServiceClient { - return &debugInfoServiceClient{cc} +func NewDebuginfoServiceClient(cc grpc.ClientConnInterface) DebuginfoServiceClient { + return &debuginfoServiceClient{cc} } -func (c *debugInfoServiceClient) Exists(ctx context.Context, in *ExistsRequest, opts ...grpc.CallOption) (*ExistsResponse, error) { - out := new(ExistsResponse) - err := c.cc.Invoke(ctx, "/parca.debuginfo.v1alpha1.DebugInfoService/Exists", in, out, opts...) +func (c *debuginfoServiceClient) Upload(ctx context.Context, opts ...grpc.CallOption) (DebuginfoService_UploadClient, error) { + stream, err := c.cc.NewStream(ctx, &DebuginfoService_ServiceDesc.Streams[0], "/parca.debuginfo.v1alpha1.DebuginfoService/Upload", opts...) if err != nil { return nil, err } - return out, nil -} - -func (c *debugInfoServiceClient) Upload(ctx context.Context, opts ...grpc.CallOption) (DebugInfoService_UploadClient, error) { - stream, err := c.cc.NewStream(ctx, &DebugInfoService_ServiceDesc.Streams[0], "/parca.debuginfo.v1alpha1.DebugInfoService/Upload", opts...) - if err != nil { - return nil, err - } - x := &debugInfoServiceUploadClient{stream} + x := &debuginfoServiceUploadClient{stream} return x, nil } -type DebugInfoService_UploadClient interface { +type DebuginfoService_UploadClient interface { Send(*UploadRequest) error CloseAndRecv() (*UploadResponse, error) grpc.ClientStream } -type debugInfoServiceUploadClient struct { +type debuginfoServiceUploadClient struct { grpc.ClientStream } -func (x *debugInfoServiceUploadClient) Send(m *UploadRequest) error { +func (x *debuginfoServiceUploadClient) Send(m *UploadRequest) error { return x.ClientStream.SendMsg(m) } -func (x *debugInfoServiceUploadClient) CloseAndRecv() (*UploadResponse, error) { +func (x *debuginfoServiceUploadClient) CloseAndRecv() (*UploadResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } @@ -90,114 +85,96 @@ func (x *debugInfoServiceUploadClient) CloseAndRecv() (*UploadResponse, error) { return m, nil } -func (c *debugInfoServiceClient) Download(ctx context.Context, in *DownloadRequest, opts ...grpc.CallOption) (DebugInfoService_DownloadClient, error) { - stream, err := c.cc.NewStream(ctx, &DebugInfoService_ServiceDesc.Streams[1], "/parca.debuginfo.v1alpha1.DebugInfoService/Download", opts...) +func (c *debuginfoServiceClient) ShouldInitiateUpload(ctx context.Context, in *ShouldInitiateUploadRequest, opts ...grpc.CallOption) (*ShouldInitiateUploadResponse, error) { + out := new(ShouldInitiateUploadResponse) + err := c.cc.Invoke(ctx, "/parca.debuginfo.v1alpha1.DebuginfoService/ShouldInitiateUpload", in, out, opts...) if err != nil { return nil, err } - x := &debugInfoServiceDownloadClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type DebugInfoService_DownloadClient interface { - Recv() (*DownloadResponse, error) - grpc.ClientStream + return out, nil } -type debugInfoServiceDownloadClient struct { - grpc.ClientStream +func (c *debuginfoServiceClient) InitiateUpload(ctx context.Context, in *InitiateUploadRequest, opts ...grpc.CallOption) (*InitiateUploadResponse, error) { + out := new(InitiateUploadResponse) + err := c.cc.Invoke(ctx, "/parca.debuginfo.v1alpha1.DebuginfoService/InitiateUpload", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (x *debugInfoServiceDownloadClient) Recv() (*DownloadResponse, error) { - m := new(DownloadResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { +func (c *debuginfoServiceClient) MarkUploadFinished(ctx context.Context, in *MarkUploadFinishedRequest, opts ...grpc.CallOption) (*MarkUploadFinishedResponse, error) { + out := new(MarkUploadFinishedResponse) + err := c.cc.Invoke(ctx, "/parca.debuginfo.v1alpha1.DebuginfoService/MarkUploadFinished", in, out, opts...) + if err != nil { return nil, err } - return m, nil + return out, nil } -// DebugInfoServiceServer is the server API for DebugInfoService service. -// All implementations must embed UnimplementedDebugInfoServiceServer +// DebuginfoServiceServer is the server API for DebuginfoService service. +// All implementations must embed UnimplementedDebuginfoServiceServer // for forward compatibility -type DebugInfoServiceServer interface { - // Exists returns true if the given build_id has debug info uploaded for it. - Exists(context.Context, *ExistsRequest) (*ExistsResponse, error) +type DebuginfoServiceServer interface { // Upload ingests debug info for a given build_id - Upload(DebugInfoService_UploadServer) error - // Download returns the debug info for a given build_id. - Download(*DownloadRequest, DebugInfoService_DownloadServer) error - mustEmbedUnimplementedDebugInfoServiceServer() + Upload(DebuginfoService_UploadServer) error + // ShouldInitiateUpload returns whether an upload for a given build_id should be initiated or not. + ShouldInitiateUpload(context.Context, *ShouldInitiateUploadRequest) (*ShouldInitiateUploadResponse, error) + // InitiateUpload returns a strategy and information to upload debug info for a given build_id. + InitiateUpload(context.Context, *InitiateUploadRequest) (*InitiateUploadResponse, error) + // MarkUploadFinished marks the upload as finished for a given build_id. + MarkUploadFinished(context.Context, *MarkUploadFinishedRequest) (*MarkUploadFinishedResponse, error) + mustEmbedUnimplementedDebuginfoServiceServer() } -// UnimplementedDebugInfoServiceServer must be embedded to have forward compatible implementations. -type UnimplementedDebugInfoServiceServer struct { +// UnimplementedDebuginfoServiceServer must be embedded to have forward compatible implementations. +type UnimplementedDebuginfoServiceServer struct { } -func (UnimplementedDebugInfoServiceServer) Exists(context.Context, *ExistsRequest) (*ExistsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Exists not implemented") -} -func (UnimplementedDebugInfoServiceServer) Upload(DebugInfoService_UploadServer) error { +func (UnimplementedDebuginfoServiceServer) Upload(DebuginfoService_UploadServer) error { return status.Errorf(codes.Unimplemented, "method Upload not implemented") } -func (UnimplementedDebugInfoServiceServer) Download(*DownloadRequest, DebugInfoService_DownloadServer) error { - return status.Errorf(codes.Unimplemented, "method Download not implemented") +func (UnimplementedDebuginfoServiceServer) ShouldInitiateUpload(context.Context, *ShouldInitiateUploadRequest) (*ShouldInitiateUploadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ShouldInitiateUpload not implemented") } -func (UnimplementedDebugInfoServiceServer) mustEmbedUnimplementedDebugInfoServiceServer() {} - -// UnsafeDebugInfoServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to DebugInfoServiceServer will -// result in compilation errors. -type UnsafeDebugInfoServiceServer interface { - mustEmbedUnimplementedDebugInfoServiceServer() +func (UnimplementedDebuginfoServiceServer) InitiateUpload(context.Context, *InitiateUploadRequest) (*InitiateUploadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitiateUpload not implemented") } +func (UnimplementedDebuginfoServiceServer) MarkUploadFinished(context.Context, *MarkUploadFinishedRequest) (*MarkUploadFinishedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MarkUploadFinished not implemented") +} +func (UnimplementedDebuginfoServiceServer) mustEmbedUnimplementedDebuginfoServiceServer() {} -func RegisterDebugInfoServiceServer(s grpc.ServiceRegistrar, srv DebugInfoServiceServer) { - s.RegisterService(&DebugInfoService_ServiceDesc, srv) +// UnsafeDebuginfoServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DebuginfoServiceServer will +// result in compilation errors. +type UnsafeDebuginfoServiceServer interface { + mustEmbedUnimplementedDebuginfoServiceServer() } -func _DebugInfoService_Exists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExistsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DebugInfoServiceServer).Exists(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/parca.debuginfo.v1alpha1.DebugInfoService/Exists", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DebugInfoServiceServer).Exists(ctx, req.(*ExistsRequest)) - } - return interceptor(ctx, in, info, handler) +func RegisterDebuginfoServiceServer(s grpc.ServiceRegistrar, srv DebuginfoServiceServer) { + s.RegisterService(&DebuginfoService_ServiceDesc, srv) } -func _DebugInfoService_Upload_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(DebugInfoServiceServer).Upload(&debugInfoServiceUploadServer{stream}) +func _DebuginfoService_Upload_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(DebuginfoServiceServer).Upload(&debuginfoServiceUploadServer{stream}) } -type DebugInfoService_UploadServer interface { +type DebuginfoService_UploadServer interface { SendAndClose(*UploadResponse) error Recv() (*UploadRequest, error) grpc.ServerStream } -type debugInfoServiceUploadServer struct { +type debuginfoServiceUploadServer struct { grpc.ServerStream } -func (x *debugInfoServiceUploadServer) SendAndClose(m *UploadResponse) error { +func (x *debuginfoServiceUploadServer) SendAndClose(m *UploadResponse) error { return x.ServerStream.SendMsg(m) } -func (x *debugInfoServiceUploadServer) Recv() (*UploadRequest, error) { +func (x *debuginfoServiceUploadServer) Recv() (*UploadRequest, error) { m := new(UploadRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err @@ -205,55 +182,91 @@ func (x *debugInfoServiceUploadServer) Recv() (*UploadRequest, error) { return m, nil } -func _DebugInfoService_Download_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(DownloadRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func _DebuginfoService_ShouldInitiateUpload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShouldInitiateUploadRequest) + if err := dec(in); err != nil { + return nil, err } - return srv.(DebugInfoServiceServer).Download(m, &debugInfoServiceDownloadServer{stream}) -} - -type DebugInfoService_DownloadServer interface { - Send(*DownloadResponse) error - grpc.ServerStream + if interceptor == nil { + return srv.(DebuginfoServiceServer).ShouldInitiateUpload(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/parca.debuginfo.v1alpha1.DebuginfoService/ShouldInitiateUpload", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DebuginfoServiceServer).ShouldInitiateUpload(ctx, req.(*ShouldInitiateUploadRequest)) + } + return interceptor(ctx, in, info, handler) } -type debugInfoServiceDownloadServer struct { - grpc.ServerStream +func _DebuginfoService_InitiateUpload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitiateUploadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DebuginfoServiceServer).InitiateUpload(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/parca.debuginfo.v1alpha1.DebuginfoService/InitiateUpload", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DebuginfoServiceServer).InitiateUpload(ctx, req.(*InitiateUploadRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *debugInfoServiceDownloadServer) Send(m *DownloadResponse) error { - return x.ServerStream.SendMsg(m) +func _DebuginfoService_MarkUploadFinished_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MarkUploadFinishedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DebuginfoServiceServer).MarkUploadFinished(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/parca.debuginfo.v1alpha1.DebuginfoService/MarkUploadFinished", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DebuginfoServiceServer).MarkUploadFinished(ctx, req.(*MarkUploadFinishedRequest)) + } + return interceptor(ctx, in, info, handler) } -// DebugInfoService_ServiceDesc is the grpc.ServiceDesc for DebugInfoService service. +// DebuginfoService_ServiceDesc is the grpc.ServiceDesc for DebuginfoService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) -var DebugInfoService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "parca.debuginfo.v1alpha1.DebugInfoService", - HandlerType: (*DebugInfoServiceServer)(nil), +var DebuginfoService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "parca.debuginfo.v1alpha1.DebuginfoService", + HandlerType: (*DebuginfoServiceServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "Exists", - Handler: _DebugInfoService_Exists_Handler, + MethodName: "ShouldInitiateUpload", + Handler: _DebuginfoService_ShouldInitiateUpload_Handler, + }, + { + MethodName: "InitiateUpload", + Handler: _DebuginfoService_InitiateUpload_Handler, + }, + { + MethodName: "MarkUploadFinished", + Handler: _DebuginfoService_MarkUploadFinished_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "Upload", - Handler: _DebugInfoService_Upload_Handler, + Handler: _DebuginfoService_Upload_Handler, ClientStreams: true, }, - { - StreamName: "Download", - Handler: _DebugInfoService_Download_Handler, - ServerStreams: true, - }, }, Metadata: "parca/debuginfo/v1alpha1/debuginfo.proto", } -func (m *ExistsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ShouldInitiateUploadRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -266,12 +279,12 @@ func (m *ExistsRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ExistsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ShouldInitiateUploadRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExistsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShouldInitiateUploadRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -283,13 +296,6 @@ func (m *ExistsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarint(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x12 - } if len(m.BuildId) > 0 { i -= len(m.BuildId) copy(dAtA[i:], m.BuildId) @@ -300,7 +306,7 @@ func (m *ExistsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ExistsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ShouldInitiateUploadResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -313,12 +319,12 @@ func (m *ExistsResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ExistsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ShouldInitiateUploadResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExistsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShouldInitiateUploadResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -330,9 +336,9 @@ func (m *ExistsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Exists { + if m.ShouldInitiateUpload { i-- - if m.Exists { + if m.ShouldInitiateUpload { dAtA[i] = 1 } else { dAtA[i] = 0 @@ -343,7 +349,7 @@ func (m *ExistsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *UploadRequest) MarshalVT() (dAtA []byte, err error) { +func (m *InitiateUploadRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -356,12 +362,12 @@ func (m *UploadRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UploadRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *InitiateUploadRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UploadRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *InitiateUploadRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -373,55 +379,22 @@ func (m *UploadRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if vtmsg, ok := m.Data.(interface { - MarshalToVT([]byte) (int, error) - SizeVT() int - }); ok { - { - size := vtmsg.SizeVT() - i -= size - if _, err := vtmsg.MarshalToVT(dAtA[i:]); err != nil { - return 0, err - } - } + if m.Size != 0 { + i = encodeVarint(dAtA, i, uint64(m.Size)) + i-- + dAtA[i] = 0x10 } - return len(dAtA) - i, nil -} - -func (m *UploadRequest_Info) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *UploadRequest_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Info != nil { - size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.BuildId) > 0 { + i -= len(m.BuildId) + copy(dAtA[i:], m.BuildId) + i = encodeVarint(dAtA, i, uint64(len(m.BuildId))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UploadRequest_ChunkData) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} -func (m *UploadRequest_ChunkData) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - i := len(dAtA) - i -= len(m.ChunkData) - copy(dAtA[i:], m.ChunkData) - i = encodeVarint(dAtA, i, uint64(len(m.ChunkData))) - i-- - dAtA[i] = 0x12 - return len(dAtA) - i, nil -} -func (m *UploadInfo) MarshalVT() (dAtA []byte, err error) { +func (m *InitiateUploadResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -434,12 +407,12 @@ func (m *UploadInfo) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UploadInfo) MarshalToVT(dAtA []byte) (int, error) { +func (m *InitiateUploadResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UploadInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *InitiateUploadResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -451,24 +424,29 @@ func (m *UploadInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarint(dAtA, i, uint64(len(m.Hash))) + if len(m.SignedUrl) > 0 { + i -= len(m.SignedUrl) + copy(dAtA[i:], m.SignedUrl) + i = encodeVarint(dAtA, i, uint64(len(m.SignedUrl))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - if len(m.BuildId) > 0 { - i -= len(m.BuildId) - copy(dAtA[i:], m.BuildId) - i = encodeVarint(dAtA, i, uint64(len(m.BuildId))) + if m.UploadStrategy != 0 { + i = encodeVarint(dAtA, i, uint64(m.UploadStrategy)) + i-- + dAtA[i] = 0x10 + } + if len(m.UploadId) > 0 { + i -= len(m.UploadId) + copy(dAtA[i:], m.UploadId) + i = encodeVarint(dAtA, i, uint64(len(m.UploadId))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UploadResponse) MarshalVT() (dAtA []byte, err error) { +func (m *MarkUploadFinishedRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -481,12 +459,12 @@ func (m *UploadResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UploadResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *MarkUploadFinishedRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UploadResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MarkUploadFinishedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -498,10 +476,12 @@ func (m *UploadResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Size != 0 { - i = encodeVarint(dAtA, i, uint64(m.Size)) + if len(m.UploadId) > 0 { + i -= len(m.UploadId) + copy(dAtA[i:], m.UploadId) + i = encodeVarint(dAtA, i, uint64(len(m.UploadId))) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } if len(m.BuildId) > 0 { i -= len(m.BuildId) @@ -513,7 +493,7 @@ func (m *UploadResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DownloadRequest) MarshalVT() (dAtA []byte, err error) { +func (m *MarkUploadFinishedResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -526,12 +506,12 @@ func (m *DownloadRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DownloadRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *MarkUploadFinishedResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DownloadRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MarkUploadFinishedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -543,17 +523,10 @@ func (m *DownloadRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.BuildId) > 0 { - i -= len(m.BuildId) - copy(dAtA[i:], m.BuildId) - i = encodeVarint(dAtA, i, uint64(len(m.BuildId))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *DownloadResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UploadRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -566,12 +539,12 @@ func (m *DownloadResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DownloadResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *UploadRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DownloadResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UploadRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -598,12 +571,12 @@ func (m *DownloadResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DownloadResponse_Info) MarshalToVT(dAtA []byte) (int, error) { +func (m *UploadRequest_Info) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DownloadResponse_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UploadRequest_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i := len(dAtA) if m.Info != nil { size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) @@ -617,12 +590,12 @@ func (m *DownloadResponse_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) } return len(dAtA) - i, nil } -func (m *DownloadResponse_ChunkData) MarshalToVT(dAtA []byte) (int, error) { +func (m *UploadRequest_ChunkData) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DownloadResponse_ChunkData) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UploadRequest_ChunkData) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i := len(dAtA) i -= len(m.ChunkData) copy(dAtA[i:], m.ChunkData) @@ -631,7 +604,7 @@ func (m *DownloadResponse_ChunkData) MarshalToSizedBufferVT(dAtA []byte) (int, e dAtA[i] = 0x12 return len(dAtA) - i, nil } -func (m *DownloadInfo) MarshalVT() (dAtA []byte, err error) { +func (m *UploadInfo) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -644,12 +617,12 @@ func (m *DownloadInfo) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DownloadInfo) MarshalToVT(dAtA []byte) (int, error) { +func (m *UploadInfo) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DownloadInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UploadInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -661,64 +634,385 @@ func (m *DownloadInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Source != 0 { - i = encodeVarint(dAtA, i, uint64(m.Source)) + if len(m.UploadId) > 0 { + i -= len(m.UploadId) + copy(dAtA[i:], m.UploadId) + i = encodeVarint(dAtA, i, uint64(len(m.UploadId))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x12 + } + if len(m.BuildId) > 0 { + i -= len(m.BuildId) + copy(dAtA[i:], m.BuildId) + i = encodeVarint(dAtA, i, uint64(len(m.BuildId))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *UploadResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - dAtA[offset] = uint8(v) - return base + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UploadResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExistsRequest) SizeVT() (n int) { + +func (m *UploadResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.BuildId) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Size != 0 { + i = encodeVarint(dAtA, i, uint64(m.Size)) + i-- + dAtA[i] = 0x10 } - if m.unknownFields != nil { - n += len(m.unknownFields) + if len(m.BuildId) > 0 { + i -= len(m.BuildId) + copy(dAtA[i:], m.BuildId) + i = encodeVarint(dAtA, i, uint64(len(m.BuildId))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ExistsResponse) SizeVT() (n int) { +func (m *Debuginfo) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.Exists { - n += 2 + return nil, nil } - if m.unknownFields != nil { - n += len(m.unknownFields) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *UploadRequest) SizeVT() (n int) { - if m == nil { - return 0 - } +func (m *Debuginfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Debuginfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Quality != nil { + size, err := m.Quality.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Upload != nil { + size, err := m.Upload.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Source != 0 { + i = encodeVarint(dAtA, i, uint64(m.Source)) + i-- + dAtA[i] = 0x10 + } + if len(m.BuildId) > 0 { + i -= len(m.BuildId) + copy(dAtA[i:], m.BuildId) + i = encodeVarint(dAtA, i, uint64(len(m.BuildId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DebuginfoUpload) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DebuginfoUpload) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DebuginfoUpload) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.FinishedAt != nil { + if marshalto, ok := interface{}(m.FinishedAt).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := marshalto.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.FinishedAt) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = encodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x2a + } + if m.StartedAt != nil { + if marshalto, ok := interface{}(m.StartedAt).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := marshalto.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.StartedAt) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = encodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if m.State != 0 { + i = encodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DebuginfoQuality) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DebuginfoQuality) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DebuginfoQuality) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NotValidElf { + i-- + if m.NotValidElf { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ShouldInitiateUploadRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BuildId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + +func (m *ShouldInitiateUploadResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ShouldInitiateUpload { + n += 2 + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + +func (m *InitiateUploadRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BuildId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Size != 0 { + n += 1 + sov(uint64(m.Size)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + +func (m *InitiateUploadResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UploadId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.UploadStrategy != 0 { + n += 1 + sov(uint64(m.UploadStrategy)) + } + l = len(m.SignedUrl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + +func (m *MarkUploadFinishedRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BuildId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.UploadId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + +func (m *MarkUploadFinishedResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + +func (m *UploadRequest) SizeVT() (n int) { + if m == nil { + return 0 + } var l int _ = l if vtmsg, ok := m.Data.(interface{ SizeVT() int }); ok { @@ -762,7 +1056,7 @@ func (m *UploadInfo) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } - l = len(m.Hash) + l = len(m.UploadId) if l > 0 { n += 1 + l + sov(uint64(l)) } @@ -791,7 +1085,7 @@ func (m *UploadResponse) SizeVT() (n int) { return n } -func (m *DownloadRequest) SizeVT() (n int) { +func (m *Debuginfo) SizeVT() (n int) { if m == nil { return 0 } @@ -801,71 +1095,474 @@ func (m *DownloadRequest) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if m.Source != 0 { + n += 1 + sov(uint64(m.Source)) + } + if m.Upload != nil { + l = m.Upload.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Quality != nil { + l = m.Quality.SizeVT() + n += 1 + l + sov(uint64(l)) + } if m.unknownFields != nil { n += len(m.unknownFields) } return n } -func (m *DownloadResponse) SizeVT() (n int) { +func (m *DebuginfoUpload) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if vtmsg, ok := m.Data.(interface{ SizeVT() int }); ok { - n += vtmsg.SizeVT() + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.State != 0 { + n += 1 + sov(uint64(m.State)) + } + if m.StartedAt != nil { + if size, ok := interface{}(m.StartedAt).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.StartedAt) + } + n += 1 + l + sov(uint64(l)) + } + if m.FinishedAt != nil { + if size, ok := interface{}(m.FinishedAt).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.FinishedAt) + } + n += 1 + l + sov(uint64(l)) } if m.unknownFields != nil { n += len(m.unknownFields) } - return n -} + return n +} + +func (m *DebuginfoQuality) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NotValidElf { + n += 2 + } + if m.unknownFields != nil { + n += len(m.unknownFields) + } + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ShouldInitiateUploadRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShouldInitiateUploadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShouldInitiateUploadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShouldInitiateUploadResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShouldInitiateUploadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShouldInitiateUploadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShouldInitiateUpload", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ShouldInitiateUpload = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitiateUploadRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitiateUploadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitiateUploadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) + } + m.Size = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitiateUploadResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitiateUploadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitiateUploadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UploadId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadStrategy", wireType) + } + m.UploadStrategy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UploadStrategy |= InitiateUploadResponse_UploadStrategy(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignedUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } -func (m *DownloadResponse_Info) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Info != nil { - l = m.Info.SizeVT() - n += 1 + l + sov(uint64(l)) - } - return n -} -func (m *DownloadResponse_ChunkData) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ChunkData) - n += 1 + l + sov(uint64(l)) - return n -} -func (m *DownloadInfo) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Source != 0 { - n += 1 + sov(uint64(m.Source)) - } - if m.unknownFields != nil { - n += len(m.unknownFields) + if iNdEx > l { + return io.ErrUnexpectedEOF } - return n -} - -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *ExistsRequest) UnmarshalVT(dAtA []byte) error { +func (m *MarkUploadFinishedRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -888,10 +1585,10 @@ func (m *ExistsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExistsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MarkUploadFinishedRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExistsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MarkUploadFinishedRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -928,7 +1625,7 @@ func (m *ExistsRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UploadId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -956,7 +1653,7 @@ func (m *ExistsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = string(dAtA[iNdEx:postIndex]) + m.UploadId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -980,7 +1677,7 @@ func (m *ExistsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExistsResponse) UnmarshalVT(dAtA []byte) error { +func (m *MarkUploadFinishedResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1003,32 +1700,12 @@ func (m *ExistsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExistsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MarkUploadFinishedResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExistsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MarkUploadFinishedResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exists", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Exists = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -1239,7 +1916,7 @@ func (m *UploadInfo) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UploadId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1267,7 +1944,7 @@ func (m *UploadInfo) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = string(dAtA[iNdEx:postIndex]) + m.UploadId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1393,7 +2070,7 @@ func (m *UploadResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DownloadRequest) UnmarshalVT(dAtA []byte) error { +func (m *Debuginfo) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1416,10 +2093,10 @@ func (m *DownloadRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownloadRequest: wiretype end group for non-group") + return fmt.Errorf("proto: Debuginfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownloadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Debuginfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1454,6 +2131,97 @@ func (m *DownloadRequest) UnmarshalVT(dAtA []byte) error { } m.BuildId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + m.Source = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Source |= Debuginfo_Source(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Upload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Upload == nil { + m.Upload = &DebuginfoUpload{} + } + if err := m.Upload.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quality", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quality == nil { + m.Quality = &DebuginfoQuality{} + } + if err := m.Quality.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -1476,7 +2244,7 @@ func (m *DownloadRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DownloadResponse) UnmarshalVT(dAtA []byte) error { +func (m *DebuginfoUpload) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1499,15 +2267,66 @@ func (m *DownloadResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownloadResponse: wiretype end group for non-group") + return fmt.Errorf("proto: DebuginfoUpload: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownloadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DebuginfoUpload: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= DebuginfoUpload_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1534,23 +2353,26 @@ func (m *DownloadResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if oneof, ok := m.Data.(*DownloadResponse_Info); ok { - if err := oneof.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.StartedAt == nil { + m.StartedAt = ×tamppb.Timestamp{} + } + if unmarshal, ok := interface{}(m.StartedAt).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } } else { - v := &DownloadInfo{} - if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.StartedAt); err != nil { return err } - m.Data = &DownloadResponse_Info{v} } iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChunkData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -1560,24 +2382,35 @@ func (m *DownloadResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - v := make([]byte, postIndex-iNdEx) - copy(v, dAtA[iNdEx:postIndex]) - m.Data = &DownloadResponse_ChunkData{v} + if m.FinishedAt == nil { + m.FinishedAt = ×tamppb.Timestamp{} + } + if unmarshal, ok := interface{}(m.FinishedAt).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.FinishedAt); err != nil { + return err + } + } iNdEx = postIndex default: iNdEx = preIndex @@ -1601,7 +2434,7 @@ func (m *DownloadResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DownloadInfo) UnmarshalVT(dAtA []byte) error { +func (m *DebuginfoQuality) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1624,17 +2457,17 @@ func (m *DownloadInfo) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownloadInfo: wiretype end group for non-group") + return fmt.Errorf("proto: DebuginfoQuality: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownloadInfo: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DebuginfoQuality: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NotValidElf", wireType) } - m.Source = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -1644,11 +2477,12 @@ func (m *DownloadInfo) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Source |= DownloadInfo_Source(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.NotValidElf = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/gen/proto/swagger/parca/debuginfo/v1alpha1/debuginfo.swagger.json b/gen/proto/swagger/parca/debuginfo/v1alpha1/debuginfo.swagger.json index 34ce482af64..75e950a8895 100644 --- a/gen/proto/swagger/parca/debuginfo/v1alpha1/debuginfo.swagger.json +++ b/gen/proto/swagger/parca/debuginfo/v1alpha1/debuginfo.swagger.json @@ -6,7 +6,7 @@ }, "tags": [ { - "name": "DebugInfoService" + "name": "DebuginfoService" } ], "consumes": [ @@ -17,15 +17,15 @@ ], "paths": {}, "definitions": { - "DownloadInfoSource": { + "InitiateUploadResponseUploadStrategy": { "type": "string", "enum": [ - "SOURCE_UNKNOWN_UNSPECIFIED", - "SOURCE_UPLOAD", - "SOURCE_DEBUGINFOD" + "UPLOAD_STRATEGY_UNSPECIFIED", + "UPLOAD_STRATEGY_GRPC", + "UPLOAD_STRATEGY_SIGNED_URL" ], - "default": "SOURCE_UNKNOWN_UNSPECIFIED", - "description": "Source enum describes the source a debuginfo is from.\n\n - SOURCE_UNKNOWN_UNSPECIFIED: To understand when no source is set we have the unknown source.\n - SOURCE_UPLOAD: The debuginfo was uploaded by a user/agent.\n - SOURCE_DEBUGINFOD: The debuginfo was downloaded from a public debuginfod server." + "default": "UPLOAD_STRATEGY_UNSPECIFIED", + "description": "The strategy to use for uploading.\n\n - UPLOAD_STRATEGY_UNSPECIFIED: The upload is not allowed.\n - UPLOAD_STRATEGY_GRPC: The upload is allowed and should be done via the Upload RPC.\n - UPLOAD_STRATEGY_SIGNED_URL: The upload is allowed and should be done via a returned signed URL." }, "parcadebuginfov1alpha1UploadResponse": { "type": "object", @@ -69,40 +69,37 @@ } } }, - "v1alpha1DownloadInfo": { + "v1alpha1InitiateUploadResponse": { "type": "object", "properties": { - "source": { - "$ref": "#/definitions/DownloadInfoSource", - "description": "Source indicates the origin of the debuginfo being downloaded." - } - }, - "description": "DownloadInfo metadata for the debug data that is being downloaded." - }, - "v1alpha1DownloadResponse": { - "type": "object", - "properties": { - "info": { - "$ref": "#/definitions/v1alpha1DownloadInfo", - "title": "info is the metadata for the debug info" + "uploadId": { + "type": "string", + "description": "The upload_id to use for uploading." + }, + "uploadStrategy": { + "$ref": "#/definitions/InitiateUploadResponseUploadStrategy", + "description": "The strategy to use for uploading." }, - "chunkData": { + "signedUrl": { "type": "string", - "format": "byte", - "title": "chunk_data is the raw bytes of the debug info" + "description": "The signed url to use for uploading using a PUT request when the upload\nstrategy is SIGNED_STRATEGY_URL." } }, - "description": "DownloadRequest returns chunked data of the debuginfo." + "description": "InitiateUploadResponse is the response to an InitiateUploadRequest." + }, + "v1alpha1MarkUploadFinishedResponse": { + "type": "object", + "description": "MarkUploadFinishedResponse is the response to a MarkUploadFinishedRequest." }, - "v1alpha1ExistsResponse": { + "v1alpha1ShouldInitiateUploadResponse": { "type": "object", "properties": { - "exists": { + "shouldInitiateUpload": { "type": "boolean", - "title": "exists indicates if there is debug data present for the given build_id" + "description": "Whether an upload should be initiated or not." } }, - "title": "ExistsResponse returns whether the given build_id has debug info" + "description": "ShouldInitiateUploadResponse is the response for ShouldInitiateUpload." }, "v1alpha1UploadInfo": { "type": "object", @@ -111,9 +108,9 @@ "type": "string", "title": "build_id is a unique identifier for the debug data" }, - "hash": { + "uploadId": { "type": "string", - "title": "hash is the hash of the source file that debug information extracted from" + "title": "upload_id is a unique identifier for the upload" } }, "title": "UploadInfo contains the build_id and other metadata for the debug data" diff --git a/gen/proto/swagger/parca/scrape/v1alpha1/scrape.swagger.json b/gen/proto/swagger/parca/scrape/v1alpha1/scrape.swagger.json index f794e33d502..98b49d606d5 100644 --- a/gen/proto/swagger/parca/scrape/v1alpha1/scrape.swagger.json +++ b/gen/proto/swagger/parca/scrape/v1alpha1/scrape.swagger.json @@ -67,17 +67,6 @@ "description": "- HEALTH_UNKNOWN_UNSPECIFIED: HEALTH_UNKNOWN_UNSPECIFIED unspecified\n - HEALTH_GOOD: HEALTH_GOOD healthy target\n - HEALTH_BAD: HEALTH_BAD unhealthy target", "title": "Health are the possible health values of a target" }, - "TargetsRequestState": { - "type": "string", - "enum": [ - "STATE_ANY_UNSPECIFIED", - "STATE_ACTIVE", - "STATE_DROPPED" - ], - "default": "STATE_ANY_UNSPECIFIED", - "description": "- STATE_ANY_UNSPECIFIED: STATE_ANY_UNSPECIFIED unspecified\n - STATE_ACTIVE: STATE_ACTIVE target active state\n - STATE_DROPPED: STATE_DROPPED target dropped state", - "title": "State represents the current state of a target" - }, "profilestorev1alpha1Label": { "type": "object", "properties": { @@ -180,6 +169,17 @@ }, "title": "Targets is a list of targets" }, + "v1alpha1TargetsRequestState": { + "type": "string", + "enum": [ + "STATE_ANY_UNSPECIFIED", + "STATE_ACTIVE", + "STATE_DROPPED" + ], + "default": "STATE_ANY_UNSPECIFIED", + "description": "- STATE_ANY_UNSPECIFIED: STATE_ANY_UNSPECIFIED unspecified\n - STATE_ACTIVE: STATE_ACTIVE target active state\n - STATE_DROPPED: STATE_DROPPED target dropped state", + "title": "State represents the current state of a target" + }, "v1alpha1TargetsResponse": { "type": "object", "properties": { diff --git a/go.mod b/go.mod index ee58c4aba03..b3b1a4f63a4 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/parca-dev/parca go 1.19 require ( + cloud.google.com/go/storage v1.27.0 github.com/alecthomas/kong v0.7.1 github.com/apache/arrow/go/v8 v8.0.1 github.com/cenkalti/backoff/v4 v4.2.0 @@ -18,6 +19,7 @@ require ( github.com/go-kit/log v0.2.1 github.com/go-ozzo/ozzo-validation/v4 v4.3.0 github.com/goburrow/cache v0.1.4 + github.com/golang/protobuf v1.5.2 github.com/google/pprof v0.0.0-20221203041831-ce31453925ec github.com/google/uuid v1.3.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 @@ -31,6 +33,7 @@ require ( github.com/klauspost/compress v1.15.12 github.com/nanmu42/limitio v1.0.0 github.com/oklog/run v1.1.0 + github.com/pkg/errors v0.9.1 github.com/polarsignals/frostdb v0.0.0-20221206153157-a83160fb1ff1 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/common v0.37.0 @@ -45,7 +48,9 @@ require ( go.opentelemetry.io/otel/trace v1.11.1 golang.org/x/exp v0.0.0-20221204150635-6dcec336b2bb golang.org/x/net v0.2.0 + golang.org/x/oauth2 v0.2.0 golang.org/x/sync v0.1.0 + google.golang.org/api v0.103.0 google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd google.golang.org/grpc v1.51.0 google.golang.org/protobuf v1.28.1 @@ -57,7 +62,6 @@ require ( cloud.google.com/go/compute v1.13.0 // indirect cloud.google.com/go/compute/metadata v0.2.1 // indirect cloud.google.com/go/iam v0.7.0 // indirect - cloud.google.com/go/storage v1.27.0 // indirect github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 // indirect @@ -132,7 +136,6 @@ require ( github.com/golang-jwt/jwt/v4 v4.2.0 // indirect github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/flatbuffers v2.0.5+incompatible // indirect @@ -198,7 +201,6 @@ require ( github.com/ovh/go-ovh v1.1.0 // indirect github.com/pierrec/lz4/v4 v4.1.15 // indirect github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect @@ -226,14 +228,12 @@ require ( go.uber.org/goleak v1.2.0 // indirect golang.org/x/crypto v0.1.0 // indirect golang.org/x/mod v0.6.0 // indirect - golang.org/x/oauth2 v0.2.0 // indirect golang.org/x/sys v0.2.0 // indirect golang.org/x/term v0.2.0 // indirect golang.org/x/text v0.4.0 // indirect golang.org/x/time v0.1.0 // indirect golang.org/x/tools v0.2.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.103.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.6 // indirect diff --git a/pkg/config/config.go b/pkg/config/config.go index 955b8005060..e5252332ae9 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -53,9 +53,13 @@ type ObjectStorage struct { // Validate returns an error if the config is not valid. func (c *Config) Validate() error { - return validation.ValidateStruct(c, - validation.Field(&c.ObjectStorage, validation.Required, Valid), - ) + if err := validation.ValidateStruct(c, + validation.Field(&c.ObjectStorage, validation.Required, ObjectStorageValid), + ); err != nil { + return err + } + + return nil } func trueValue() *bool { diff --git a/pkg/config/validation.go b/pkg/config/validation.go index 7f254b6c891..702a194f084 100644 --- a/pkg/config/validation.go +++ b/pkg/config/validation.go @@ -20,14 +20,14 @@ import ( "github.com/thanos-io/objstore/client" ) -// Valid is the ValidRule. -var Valid = ValidRule{} +// ObjectStorageValid is the ValidRule. +var ObjectStorageValid = ObjectStorageValidRule{} -// ValidRule is a validation rule for the Config. It implements the validation.Rule interface. -type ValidRule struct{} +// ObjectStorageValidRule is a validation rule for the Config. It implements the validation.Rule interface. +type ObjectStorageValidRule struct{} -// Validate returns an error if the config is not valid. -func (v ValidRule) Validate(value interface{}) error { +// ObjectStorageValidate returns an error if the config is not valid. +func (v ObjectStorageValidRule) Validate(value interface{}) error { c, ok := value.(*ObjectStorage) if !ok { return errors.New("DebugInfo is invalid") diff --git a/pkg/debuginfo/client.go b/pkg/debuginfo/client.go index 657882ea824..ef62ee9493b 100644 --- a/pkg/debuginfo/client.go +++ b/pkg/debuginfo/client.go @@ -38,40 +38,23 @@ const ( MaxMsgSize = 1024 * 1024 * 64 ) -type Client struct { - c debuginfopb.DebugInfoServiceClient +type UploadClient struct { + debuginfopb.DebuginfoServiceClient } -func NewDebugInfoClient(conn *grpc.ClientConn) *Client { - return &Client{ - c: debuginfopb.NewDebugInfoServiceClient(conn), - } -} - -func (c *Client) Exists(ctx context.Context, buildID, hash string) (bool, error) { - res, err := c.c.Exists(ctx, &debuginfopb.ExistsRequest{ - BuildId: buildID, - Hash: hash, - }) - if err != nil { - return false, err - } - - return res.Exists, nil +func NewDebuginfoClient(client debuginfopb.DebuginfoServiceClient) *UploadClient { + return &UploadClient{client} } -func (c *Client) Upload(ctx context.Context, buildID, hash string, r io.Reader) (uint64, error) { - stream, err := c.c.Upload(ctx, grpc.MaxCallSendMsgSize(MaxMsgSize)) +func (c *UploadClient) Upload(ctx context.Context, info *debuginfopb.UploadInfo, r io.Reader) (uint64, error) { + stream, err := c.DebuginfoServiceClient.Upload(ctx, grpc.MaxCallSendMsgSize(MaxMsgSize)) if err != nil { return 0, fmt.Errorf("initiate upload: %w", err) } err = stream.Send(&debuginfopb.UploadRequest{ Data: &debuginfopb.UploadRequest_Info{ - Info: &debuginfopb.UploadInfo{ - BuildId: buildID, - Hash: hash, - }, + Info: info, }, }) if err != nil { @@ -131,70 +114,6 @@ func (c *Client) Upload(ctx context.Context, buildID, hash string, r io.Reader) return res.Size, nil } -type Downloader struct { - stream debuginfopb.DebugInfoService_DownloadClient - info *debuginfopb.DownloadInfo -} - -func (c *Client) Downloader(ctx context.Context, buildID string) (*Downloader, error) { - stream, err := c.c.Download(ctx, &debuginfopb.DownloadRequest{ - BuildId: buildID, - }, grpc.MaxCallRecvMsgSize(MaxMsgSize)) - if err != nil { - return nil, fmt.Errorf("initiate download: %w", err) - } - - res, err := stream.Recv() - if err != nil { - return nil, fmt.Errorf("receive download info: %w", err) - } - - info := res.GetInfo() - if info == nil { - return nil, fmt.Errorf("download info is nil") - } - - return &Downloader{ - stream: stream, - info: info, - }, nil -} - -func (d *Downloader) Info() *debuginfopb.DownloadInfo { - return d.info -} - -func (d *Downloader) Close() error { - // Note that CloseSend does not Recv, therefore is not guaranteed to release all resources - return d.stream.CloseSend() -} - -func (d *Downloader) Download(ctx context.Context, w io.Writer) (int, error) { - bytesWritten := 0 - for { - res, err := d.stream.Recv() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return bytesWritten, fmt.Errorf("receive next chunk: %w", err) - } - - chunkData := res.GetChunkData() - if chunkData == nil { - return bytesWritten, fmt.Errorf("chunk does not contain data") - } - - n, err := w.Write(chunkData) - if err != nil { - return bytesWritten, fmt.Errorf("write next chunk: %w", err) - } - bytesWritten += n - } - - return bytesWritten, nil -} - // sentinelError checks underlying error for grpc.StatusCode and returns if it's a known and expected error. func sentinelError(err error) error { if sts, ok := status.FromError(err); ok { diff --git a/pkg/debuginfo/debuginfod.go b/pkg/debuginfo/debuginfod.go index ff70e21bd9b..b797df07b8a 100644 --- a/pkg/debuginfo/debuginfod.go +++ b/pkg/debuginfo/debuginfod.go @@ -28,33 +28,38 @@ import ( "golang.org/x/net/context" ) -type DebugInfodClient interface { - GetDebugInfo(ctx context.Context, buildid string) (io.ReadCloser, error) +type DebuginfodClient interface { + Get(ctx context.Context, buildid string) (io.ReadCloser, error) + Exists(ctx context.Context, buildid string) (bool, error) } -type NopDebugInfodClient struct{} +type NopDebuginfodClient struct{} -func (NopDebugInfodClient) GetDebugInfo(context.Context, string) (io.ReadCloser, error) { - return io.NopCloser(bytes.NewReader(nil)), ErrDebugInfoNotFound +func (NopDebuginfodClient) Get(context.Context, string) (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(nil)), ErrDebuginfoNotFound } -type HTTPDebugInfodClient struct { +func (NopDebuginfodClient) Exists(context.Context, string) (bool, error) { + return false, nil +} + +type HTTPDebuginfodClient struct { logger log.Logger client *http.Client - UpstreamServers []*url.URL + upstreamServers []*url.URL timeoutDuration time.Duration } -type DebugInfodClientObjectStorageCache struct { +type DebuginfodClientObjectStorageCache struct { logger log.Logger - client DebugInfodClient + client DebuginfodClient bucket objstore.Bucket } -// NewHTTPDebugInfodClient returns a new HTTP debug info client. -func NewHTTPDebugInfodClient(logger log.Logger, serverURLs []string, timeoutDuration time.Duration) (*HTTPDebugInfodClient, error) { +// NewHTTPDebuginfodClient returns a new HTTP debug info client. +func NewHTTPDebuginfodClient(logger log.Logger, serverURLs []string, timeoutDuration time.Duration) (*HTTPDebuginfodClient, error) { logger = log.With(logger, "component", "debuginfod") parsedURLs := make([]*url.URL, 0, len(serverURLs)) for _, serverURL := range serverURLs { @@ -67,17 +72,17 @@ func NewHTTPDebugInfodClient(logger log.Logger, serverURLs []string, timeoutDura return nil, fmt.Errorf("unsupported scheme %q", u.Scheme) } } - return &HTTPDebugInfodClient{ + return &HTTPDebuginfodClient{ logger: logger, - UpstreamServers: parsedURLs, + upstreamServers: parsedURLs, timeoutDuration: timeoutDuration, client: http.DefaultClient, }, nil } -// NewDebugInfodClientWithObjectStorageCache creates a new DebugInfodClient that caches the debug information in the object storage. -func NewDebugInfodClientWithObjectStorageCache(logger log.Logger, bucket objstore.Bucket, h DebugInfodClient) (DebugInfodClient, error) { - return &DebugInfodClientObjectStorageCache{ +// NewDebuginfodClientWithObjectStorageCache creates a new DebuginfodClient that caches the debug information in the object storage. +func NewDebuginfodClientWithObjectStorageCache(logger log.Logger, bucket objstore.Bucket, h DebuginfodClient) (DebuginfodClient, error) { + return &DebuginfodClientObjectStorageCache{ logger: logger, client: h, bucket: bucket, @@ -93,10 +98,9 @@ type readCloser struct { closer } -// GetDebugInfo returns debug info for given buildid while caching it in object storage. -func (c *DebugInfodClientObjectStorageCache) GetDebugInfo(ctx context.Context, buildID string) (io.ReadCloser, error) { - logger := log.With(c.logger, "buildid", buildID) - debugInfo, err := c.client.GetDebugInfo(ctx, buildID) +// Get returns debuginfo for given buildid while caching it in object storage. +func (c *DebuginfodClientObjectStorageCache) Get(ctx context.Context, buildID string) (io.ReadCloser, error) { + debuginfo, err := c.client.Get(ctx, buildID) if err != nil { return nil, err } @@ -104,18 +108,18 @@ func (c *DebugInfodClientObjectStorageCache) GetDebugInfo(ctx context.Context, b r, w := io.Pipe() go func() { defer w.Close() - defer debugInfo.Close() + defer debuginfo.Close() - // TODO(kakkoyun): Use store.upload() to upload the debug info to object storage. + // TODO(kakkoyun): Use store.upload() to upload the debuginfo to object storage. if err := c.bucket.Upload(ctx, objectPath(buildID), r); err != nil { - level.Error(logger).Log("msg", "failed to upload downloaded debuginfod file", "err", err) + level.Error(c.logger).Log("msg", "failed to upload downloaded debuginfod file", "err", err, "build_id", buildID) } }() return readCloser{ - Reader: io.TeeReader(debugInfo, w), + Reader: io.TeeReader(debuginfo, w), closer: closer(func() error { - defer debugInfo.Close() + defer debuginfo.Close() if err := w.Close(); err != nil { return err @@ -125,10 +129,22 @@ func (c *DebugInfodClientObjectStorageCache) GetDebugInfo(ctx context.Context, b }, nil } -// GetDebugInfo returns debug information file for given buildID by downloading it from upstream servers. -func (c *HTTPDebugInfodClient) GetDebugInfo(ctx context.Context, buildID string) (io.ReadCloser, error) { - logger := log.With(c.logger, "buildid", buildID) +// Exists returns true if debuginfo for given buildid exists. +func (c *DebuginfodClientObjectStorageCache) Exists(ctx context.Context, buildID string) (bool, error) { + exists, err := c.bucket.Exists(ctx, objectPath(buildID)) + if err != nil { + return false, err + } + + if exists { + return true, nil + } + + return c.client.Exists(ctx, buildID) +} +// Get returns debug information file for given buildID by downloading it from upstream servers. +func (c *HTTPDebuginfodClient) Get(ctx context.Context, buildID string) (io.ReadCloser, error) { // e.g: // "https://debuginfod.elfutils.org/" // "https://debuginfod.systemtap.org/" @@ -139,7 +155,7 @@ func (c *HTTPDebugInfodClient) GetDebugInfo(ctx context.Context, buildID string) // "https://debuginfod.altlinux.org/" // "https://debuginfod.archlinux.org/" // "https://debuginfod.centos.org/" - for _, u := range c.UpstreamServers { + for _, u := range c.upstreamServers { serverURL := *u rc, err := func(serverURL url.URL) (io.ReadCloser, error) { ctx, cancel := context.WithTimeout(ctx, c.timeoutDuration) @@ -152,20 +168,28 @@ func (c *HTTPDebugInfodClient) GetDebugInfo(ctx context.Context, buildID string) return rc, nil }(serverURL) if err != nil { - level.Warn(logger).Log( - "msg", "failed to download debug info file from upstream debuginfod server, trying next one (if exists)", - "server", serverURL, "err", err, - ) continue } if rc != nil { return rc, nil } } - return nil, ErrDebugInfoNotFound + return nil, ErrDebuginfoNotFound +} + +func (c *HTTPDebuginfodClient) Exists(ctx context.Context, buildID string) (bool, error) { + r, err := c.Get(ctx, buildID) + if err != nil { + if err == ErrDebuginfoNotFound { + return false, nil + } + return false, err + } + + return true, r.Close() } -func (c *HTTPDebugInfodClient) request(ctx context.Context, u url.URL, buildID string) (io.ReadCloser, error) { +func (c *HTTPDebuginfodClient) request(ctx context.Context, u url.URL, buildID string) (io.ReadCloser, error) { // https://www.mankier.com/8/debuginfod#Webapi // Endpoint: /buildid/BUILDID/debuginfo // If the given buildid is known to the server, @@ -187,7 +211,7 @@ func (c *HTTPDebugInfodClient) request(ctx context.Context, u url.URL, buildID s return resp.Body, nil case 4: if resp.StatusCode == http.StatusNotFound { - return nil, ErrDebugInfoNotFound + return nil, ErrDebuginfoNotFound } return nil, fmt.Errorf("client error: %s", resp.Status) case 5: diff --git a/pkg/debuginfo/debuginfod_test.go b/pkg/debuginfo/debuginfod_test.go index 08b2d237a31..8ad139c4c06 100644 --- a/pkg/debuginfo/debuginfod_test.go +++ b/pkg/debuginfo/debuginfod_test.go @@ -72,7 +72,7 @@ func TestHTTPDebugInfodClient_request(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := &HTTPDebugInfodClient{ + c := &HTTPDebuginfodClient{ logger: log.NewNopLogger(), timeoutDuration: tt.fields.timeoutDuration, client: &http.Client{ diff --git a/pkg/debuginfo/fetcher.go b/pkg/debuginfo/fetcher.go new file mode 100644 index 00000000000..2a7d5b57162 --- /dev/null +++ b/pkg/debuginfo/fetcher.go @@ -0,0 +1,68 @@ +// Copyright 2022 The Parca Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package debuginfo + +import ( + "context" + "errors" + "fmt" + "io" + + debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1" + "github.com/thanos-io/objstore" +) + +var ErrUnknownDebuginfoSource = errors.New("unknown debuginfo source") + +type Fetcher struct { + metadata MetadataManager + debuginfodClient DebuginfodClient + bucket objstore.Bucket +} + +func NewFetcher( + metadata MetadataManager, + debuginfodClient DebuginfodClient, + bucket objstore.Bucket, +) *Fetcher { + return &Fetcher{ + metadata: metadata, + debuginfodClient: debuginfodClient, + bucket: bucket, + } +} + +func (f *Fetcher) FetchDebuginfo(ctx context.Context, buildid string) (io.ReadCloser, error) { + dbginfo, err := f.metadata.Fetch(ctx, buildid) + if err != nil { + return nil, fmt.Errorf("fetching metadata: %w", err) + } + + switch dbginfo.Source { + case debuginfopb.Debuginfo_SOURCE_UPLOAD: + return f.fetchFromBucket(ctx, dbginfo) + case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD: + return f.fetchFromDebuginfod(ctx, dbginfo) + default: + return nil, ErrUnknownDebuginfoSource + } +} + +func (f *Fetcher) fetchFromBucket(ctx context.Context, dbginfo *debuginfopb.Debuginfo) (io.ReadCloser, error) { + return f.bucket.Get(ctx, objectPath(dbginfo.BuildId)) +} + +func (f *Fetcher) fetchFromDebuginfod(ctx context.Context, dbginfo *debuginfopb.Debuginfo) (io.ReadCloser, error) { + return f.debuginfodClient.Get(ctx, dbginfo.BuildId) +} diff --git a/pkg/debuginfo/metadata.go b/pkg/debuginfo/metadata.go index f839ba90ff0..9e41c4ec87e 100644 --- a/pkg/debuginfo/metadata.go +++ b/pkg/debuginfo/metadata.go @@ -16,76 +16,27 @@ package debuginfo import ( "bytes" "context" - "encoding/json" "errors" "fmt" + "io" "path" - "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/golang/protobuf/jsonpb" "github.com/thanos-io/objstore" -) + "google.golang.org/protobuf/types/known/timestamppb" -var ( - ErrMetadataShouldExist = errors.New("debug info metadata should exist") - ErrMetadataUnexpectedState = errors.New("debug info metadata state is unexpected") - // There's no debug info metadata. This could mean that an older version - // uploaded the debug info files, but there's no record of the metadata, yet. - ErrMetadataNotFound = errors.New("debug info metadata not found") + debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1" ) -type MetadataState int64 - -const ( - MetadataStateUnknown MetadataState = iota - // The debug info file is being uploaded. - MetadataStateUploading - // The debug info file is fully uploaded. - MetadataStateUploaded - // The debug info file is corrupted. - MetadataStateCorrupted +var ( + ErrMetadataShouldExist = errors.New("debuginfo metadata should exist") + ErrMetadataUnexpectedState = errors.New("debuginfo metadata state is unexpected") + ErrMetadataNotFound = errors.New("debuginfo metadata not found") + ErrUploadMetadataNotFound = errors.New("debuginfo upload metadata not found") + ErrUploadIDMismatch = errors.New("debuginfo upload id mismatch") ) -var mdStateStr = map[MetadataState]string{ - MetadataStateUnknown: "METADATA_STATE_UNKNOWN", - MetadataStateUploading: "METADATA_STATE_UPLOADING", - MetadataStateUploaded: "METADATA_STATE_UPLOADED", - MetadataStateCorrupted: "METADATA_STATE_CORRUPTED", -} - -var strMdState = map[string]MetadataState{ - "METADATA_STATE_UNKNOWN": MetadataStateUnknown, - "METADATA_STATE_UPLOADING": MetadataStateUploading, - "METADATA_STATE_UPLOADED": MetadataStateUploaded, - "METADATA_STATE_CORRUPTED": MetadataStateCorrupted, -} - -func (m MetadataState) String() string { - val, ok := mdStateStr[m] - if !ok { - return "" - } - return val -} - -func (m MetadataState) MarshalJSON() ([]byte, error) { - buffer := bytes.NewBufferString(`"`) - buffer.WriteString(mdStateStr[m]) - buffer.WriteString(`"`) - return buffer.Bytes(), nil -} - -func (m *MetadataState) UnmarshalJSON(b []byte) error { - var s string - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *m = strMdState[s] - return nil -} - type ObjectStoreMetadata struct { logger log.Logger @@ -96,121 +47,97 @@ func NewObjectStoreMetadata(logger log.Logger, bucket objstore.Bucket) *ObjectSt return &ObjectStoreMetadata{logger: log.With(logger, "component", "debuginfo-metadata"), bucket: bucket} } -type Metadata struct { - State MetadataState `json:"state"` - BuildID string `json:"build_id"` - Hash string `json:"hash"` - UploadStartedAt int64 `json:"upload_started_at"` - UploadFinishedAt int64 `json:"upload_finished_at"` -} - -func (m *ObjectStoreMetadata) MarkAsCorrupted(ctx context.Context, buildID string) error { - if err := m.write(ctx, buildID, &Metadata{ - State: MetadataStateCorrupted, - }); err != nil { - return fmt.Errorf("failed to write metadata: %w", err) - } - level.Debug(m.logger).Log("msg", "marked as corrupted", "buildid", buildID) - return nil -} - -func (m *ObjectStoreMetadata) MarkAsUploading(ctx context.Context, buildID string) error { - _, err := m.bucket.Get(ctx, metadataObjectPath(buildID)) - // The metadata file should not exist yet. Not erroring here because there's - // room for a race condition. - if err == nil { - level.Info(m.logger).Log("msg", "there should not be a metadata file") - return nil +func (m *ObjectStoreMetadata) MarkAsNotValidELF(ctx context.Context, buildID string) error { + dbginfo, err := m.Fetch(ctx, buildID) + if err != nil { + return err } - if !m.bucket.IsObjNotFoundErr(err) { - level.Error(m.logger).Log("msg", "unexpected error", "err", err) - return err + dbginfo.Quality = &debuginfopb.DebuginfoQuality{ + NotValidElf: true, } - if err := m.write(ctx, buildID, &Metadata{ - State: MetadataStateUploading, - BuildID: buildID, - UploadStartedAt: time.Now().Unix(), - }); err != nil { + if err := m.write(ctx, dbginfo); err != nil { return fmt.Errorf("failed to write metadata: %w", err) } - level.Debug(m.logger).Log("msg", "marked as uploading", "buildid", buildID) return nil } -func (m *ObjectStoreMetadata) MarkAsUploaded(ctx context.Context, buildID, hash string) error { - r, err := m.bucket.Get(ctx, metadataObjectPath(buildID)) - if err != nil { - level.Error(m.logger).Log("msg", "expected metadata file", "err", err) - return ErrMetadataShouldExist - } - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(r) - if err != nil { - return err - } +func (m *ObjectStoreMetadata) MarkAsDebuginfodSource(ctx context.Context, buildID string) error { + return m.write(ctx, &debuginfopb.Debuginfo{ + BuildId: buildID, + Source: debuginfopb.Debuginfo_SOURCE_DEBUGINFOD, + }) +} + +func (m *ObjectStoreMetadata) MarkAsUploading(ctx context.Context, buildID, uploadID string, startedAt *timestamppb.Timestamp) error { + return m.write(ctx, &debuginfopb.Debuginfo{ + BuildId: buildID, + Source: debuginfopb.Debuginfo_SOURCE_UPLOAD, + Upload: &debuginfopb.DebuginfoUpload{ + Id: uploadID, + State: debuginfopb.DebuginfoUpload_STATE_UPLOADING, + StartedAt: startedAt, + }, + }) +} - metaData := &Metadata{} - if err := json.Unmarshal(buf.Bytes(), metaData); err != nil { +func (m *ObjectStoreMetadata) MarkAsUploaded(ctx context.Context, buildID, uploadID string, finishedAt *timestamppb.Timestamp) error { + dbginfo, err := m.Fetch(ctx, buildID) + if err != nil { return err } - // There's a small window where a race could happen. - if metaData.State == MetadataStateUploaded { - return nil + if dbginfo.Upload == nil { + return ErrUploadMetadataNotFound } - if metaData.State == MetadataStateUploading && metaData.BuildID != buildID { - return errors.New("build ids do not match") + if dbginfo.Upload.Id != uploadID { + return ErrUploadIDMismatch } - metaData.State = MetadataStateUploaded - metaData.BuildID = buildID - metaData.Hash = hash - metaData.UploadFinishedAt = time.Now().Unix() - - metadataBytes, _ := json.MarshalIndent(&metaData, "", "\t") - newData := bytes.NewReader(metadataBytes) - - if err := m.bucket.Upload(ctx, metadataObjectPath(buildID), newData); err != nil { - return err - } + dbginfo.Upload.State = debuginfopb.DebuginfoUpload_STATE_UPLOADED + dbginfo.Upload.FinishedAt = finishedAt - level.Debug(m.logger).Log("msg", "marked as uploaded", "buildid", buildID) - return nil + return m.write(ctx, dbginfo) } -func (m *ObjectStoreMetadata) Fetch(ctx context.Context, buildID string) (*Metadata, error) { +func (m *ObjectStoreMetadata) Fetch(ctx context.Context, buildID string) (*debuginfopb.Debuginfo, error) { r, err := m.bucket.Get(ctx, metadataObjectPath(buildID)) if err != nil { if m.bucket.IsObjNotFoundErr(err) { return nil, ErrMetadataNotFound } - return nil, err + return nil, fmt.Errorf("fetch debuginfo metadata from object storage: %w", err) } - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(r) + content, err := io.ReadAll(r) if err != nil { - return nil, err + return nil, fmt.Errorf("read debuginfo metadata from object storage: %w", err) } - metaData := &Metadata{} - if err := json.Unmarshal(buf.Bytes(), metaData); err != nil { - return nil, err + dbginfo := &debuginfopb.Debuginfo{} + if err := (&jsonpb.Unmarshaler{}).Unmarshal(bytes.NewBuffer(content), dbginfo); err != nil { + return nil, fmt.Errorf("unmarshal debuginfo metadata: %w", err) } - return metaData, nil + return dbginfo, nil } -func (m *ObjectStoreMetadata) write(ctx context.Context, buildID string, md *Metadata) error { - metadataBytes, _ := json.MarshalIndent(md, "", "\t") - r := bytes.NewReader(metadataBytes) - if err := m.bucket.Upload(ctx, metadataObjectPath(buildID), r); err != nil { - level.Error(m.logger).Log("msg", "failed to create metadata file", "err", err) +func (m *ObjectStoreMetadata) write(ctx context.Context, dbginfo *debuginfopb.Debuginfo) error { + if dbginfo.BuildId == "" { + return errors.New("build id is required to wirte debuginfo metadata") + } + + debuginfoJSON, err := (&jsonpb.Marshaler{Indent: " "}).MarshalToString(dbginfo) + if err != nil { return err } + + r := bytes.NewReader([]byte(debuginfoJSON)) + if err := m.bucket.Upload(ctx, metadataObjectPath(dbginfo.BuildId), r); err != nil { + return fmt.Errorf("write debuginfo metadata to object storage: %w", err) + } return nil } diff --git a/pkg/debuginfo/metadata_test.go b/pkg/debuginfo/metadata_test.go index f901038f66d..0045917f397 100644 --- a/pkg/debuginfo/metadata_test.go +++ b/pkg/debuginfo/metadata_test.go @@ -14,22 +14,28 @@ package debuginfo import ( + "bytes" "context" - "encoding/json" "os" "reflect" "testing" + "time" "github.com/go-kit/log" + "github.com/golang/protobuf/jsonpb" + debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore/client" "github.com/thanos-io/objstore/providers/filesystem" "go.opentelemetry.io/otel/trace" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v2" ) func TestMetadata(t *testing.T) { + ctx := context.Background() tracer := trace.NewNoopTracerProvider().Tracer("") dir, err := os.MkdirTemp("", "parca-test") @@ -58,49 +64,45 @@ func TestMetadata(t *testing.T) { cacheDir, NewObjectStoreMetadata(logger, bucket), bucket, - NopDebugInfodClient{}, + NopDebuginfodClient{}, + SignedUpload{ + Enabled: false, + }, + time.Minute*15, + 1024*1024*1024, ) require.NoError(t, err) // Test that the initial state should be empty. - _, err = store.metadata.Fetch(context.Background(), "fake-build-id") + _, err = store.metadata.Fetch(ctx, "fake-build-id") require.ErrorIs(t, err, ErrMetadataNotFound) // Updating the state should be written to blob storage. - err = store.metadata.MarkAsUploading(context.Background(), "fake-build-id") + time := time.Now() + err = store.metadata.MarkAsUploading(ctx, "fake-build-id", "fake-upload-id", timestamppb.New(time)) require.NoError(t, err) - md, err := store.metadata.Fetch(context.Background(), "fake-build-id") + dbginfo, err := store.metadata.Fetch(ctx, "fake-build-id") require.NoError(t, err) - require.Equal(t, MetadataStateUploading, md.State) + require.Equal(t, "fake-build-id", dbginfo.BuildId) + require.Equal(t, "fake-upload-id", dbginfo.Upload.Id) + require.Equal(t, debuginfopb.DebuginfoUpload_STATE_UPLOADING, dbginfo.Upload.State) } -func TestMetadata_MarshalJSON(t *testing.T) { +func TestMetadataMarshalJSON(t *testing.T) { tests := []struct { - m Metadata + dbginfo *debuginfopb.Debuginfo want string wantErr bool }{ { - m: Metadata{State: MetadataStateUnknown, BuildID: "build_id", Hash: "hash"}, - want: `{"state":"METADATA_STATE_UNKNOWN","build_id":"build_id","hash":"hash","upload_started_at":0,"upload_finished_at":0}`, - }, - { - m: Metadata{State: MetadataStateUploading, BuildID: "build_id", Hash: "hash"}, - want: `{"state":"METADATA_STATE_UPLOADING","build_id":"build_id","hash":"hash","upload_started_at":0,"upload_finished_at":0}`, - }, - { - m: Metadata{State: MetadataStateUploaded, BuildID: "build_id", Hash: "hash"}, - want: `{"state":"METADATA_STATE_UPLOADED","build_id":"build_id","hash":"hash","upload_started_at":0,"upload_finished_at":0}`, - }, - { - m: Metadata{State: MetadataStateCorrupted, BuildID: "build_id", Hash: "hash"}, - want: `{"state":"METADATA_STATE_CORRUPTED","build_id":"build_id","hash":"hash","upload_started_at":0,"upload_finished_at":0}`, + dbginfo: &debuginfopb.Debuginfo{BuildId: "my-build-id", Upload: &debuginfopb.DebuginfoUpload{Id: "my-upload-id", State: debuginfopb.DebuginfoUpload_STATE_UPLOADING}}, + want: `{"buildId":"my-build-id","upload":{"id":"my-upload-id","state":"STATE_UPLOADING"}}`, }, } for _, tt := range tests { - t.Run(tt.m.State.String(), func(t *testing.T) { - got, err := json.Marshal(tt.m) + t.Run(tt.dbginfo.Upload.State.String(), func(t *testing.T) { + got, err := (&jsonpb.Marshaler{}).MarshalToString(tt.dbginfo) if (err != nil) != tt.wantErr { t.Errorf("MarshalJSON() error = %v, wantErr %v", err, tt.wantErr) return @@ -113,36 +115,27 @@ func TestMetadata_MarshalJSON(t *testing.T) { } } -func TestMetadata_UnmarshalJSON(t *testing.T) { +func TestMetadataUnmarshalJSON(t *testing.T) { tests := []struct { - name string - b []byte - want Metadata + input string + want *debuginfopb.Debuginfo wantErr bool }{ { - b: []byte(`{"state":"METADATA_STATE_UNKNOWN","build_id":"build_id","hash":"hash","upload_started_at":0,"upload_finished_at":0}`), - want: Metadata{State: MetadataStateUnknown, BuildID: "build_id", Hash: "hash"}, - }, - { - b: []byte(`{"state":"METADATA_STATE_UPLOADING","build_id":"build_id","hash":"hash","upload_started_at":0,"upload_finished_at":0}`), - want: Metadata{State: MetadataStateUploading, BuildID: "build_id", Hash: "hash"}, - }, - { - b: []byte(`{"state":"METADATA_STATE_UPLOADED","build_id":"build_id","hash":"hash","upload_started_at":0,"upload_finished_at":0}`), - want: Metadata{State: MetadataStateUploaded, BuildID: "build_id", Hash: "hash"}, - }, - { - b: []byte(`{"state":"METADATA_STATE_CORRUPTED","build_id":"build_id","hash":"hash","upload_started_at":0,"upload_finished_at":0}`), - want: Metadata{State: MetadataStateCorrupted, BuildID: "build_id", Hash: "hash"}, + input: `{"buildId":"my-build-id","upload":{"id":"my-upload-id","state":"STATE_UPLOADING"}}`, + want: &debuginfopb.Debuginfo{BuildId: "my-build-id", Upload: &debuginfopb.DebuginfoUpload{Id: "my-upload-id", State: debuginfopb.DebuginfoUpload_STATE_UPLOADING}}, }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - res := Metadata{} - - if err := json.Unmarshal(tt.b, &res); (err != nil) != tt.wantErr { - t.Errorf("UnmarshalJSON() error = %v, wantErr %v", err, tt.wantErr) + t.Run(tt.want.Upload.State.String(), func(t *testing.T) { + got := &debuginfopb.Debuginfo{} + err := (&jsonpb.Unmarshaler{}).Unmarshal(bytes.NewBuffer([]byte(tt.input)), got) + if (err != nil) != tt.wantErr { + t.Errorf("MarshalJSON() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !proto.Equal(got, tt.want) { + t.Errorf("MarshalJSON() got = %v, want %v", got, tt.want) } }) } diff --git a/pkg/debuginfo/reader.go b/pkg/debuginfo/reader.go index dfb6b1571fc..9e6f28c5a02 100644 --- a/pkg/debuginfo/reader.go +++ b/pkg/debuginfo/reader.go @@ -26,7 +26,7 @@ import ( ) type UploadReader struct { - stream debuginfopb.DebugInfoService_UploadServer + stream debuginfopb.DebuginfoService_UploadServer cur io.Reader size uint64 } diff --git a/pkg/debuginfo/store.go b/pkg/debuginfo/store.go index e933767b475..4b3244ed7e6 100644 --- a/pkg/debuginfo/store.go +++ b/pkg/debuginfo/store.go @@ -14,33 +14,30 @@ package debuginfo import ( - "bytes" "context" - "debug/elf" "encoding/hex" "errors" "fmt" "io" - "os" "path" "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/google/uuid" lru "github.com/hashicorp/golang-lru/v2" - "github.com/nanmu42/limitio" "github.com/thanos-io/objstore" "github.com/thanos-io/objstore/client" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1" - "github.com/parca-dev/parca/pkg/symbol/elfutils" + "github.com/parca-dev/parca/pkg/signedupload" ) -var ErrDebugInfoNotFound = errors.New("debug info not found") +var ErrDebuginfoNotFound = errors.New("debuginfo not found") type CacheProvider string @@ -63,35 +60,48 @@ type CacheConfig struct { } type MetadataManager interface { - MarkAsCorrupted(ctx context.Context, buildID string) error - MarkAsUploading(ctx context.Context, buildID string) error - MarkAsUploaded(ctx context.Context, buildID, hash string) error - Fetch(ctx context.Context, buildID string) (*Metadata, error) + MarkAsDebuginfodSource(ctx context.Context, buildID string) error + MarkAsUploading(ctx context.Context, buildID, uploadID string, startedAt *timestamppb.Timestamp) error + MarkAsUploaded(ctx context.Context, buildID, uploadID string, finishedAt *timestamppb.Timestamp) error + Fetch(ctx context.Context, buildID string) (*debuginfopb.Debuginfo, error) } type Store struct { - debuginfopb.UnimplementedDebugInfoServiceServer + debuginfopb.UnimplementedDebuginfoServiceServer - tracer trace.Tracer - logger log.Logger - cacheDir string + tracer trace.Tracer + logger log.Logger bucket objstore.Bucket metadata MetadataManager - debuginfodClient DebugInfodClient + debuginfodClient DebuginfodClient existsCache *lru.ARCCache[string, struct{}] + + signedUpload SignedUpload + + maxUploadDuration time.Duration + maxUploadSize int64 + + timeNow func() time.Time +} + +type SignedUpload struct { + Enabled bool + Client signedupload.Client } // NewStore returns a new debug info store. func NewStore( tracer trace.Tracer, logger log.Logger, - cacheDir string, metadata MetadataManager, bucket objstore.Bucket, - debuginfodClient DebugInfodClient, + debuginfodClient DebuginfodClient, + signedUpload SignedUpload, + maxUploadDuration time.Duration, + maxUploadSize int64, ) (*Store, error) { existsCache, err := lru.NewARC[string, struct{}](100_000) if err != nil { @@ -99,18 +109,24 @@ func NewStore( } return &Store{ - tracer: tracer, - logger: log.With(logger, "component", "debuginfo"), - bucket: bucket, - cacheDir: cacheDir, - metadata: metadata, - debuginfodClient: debuginfodClient, + tracer: tracer, + logger: log.With(logger, "component", "debuginfo"), + bucket: bucket, + metadata: metadata, + debuginfodClient: debuginfodClient, + signedUpload: signedUpload, + maxUploadDuration: maxUploadDuration, + maxUploadSize: maxUploadSize, + timeNow: time.Now, existsCache: existsCache, }, nil } -func (s *Store) Exists(ctx context.Context, req *debuginfopb.ExistsRequest) (*debuginfopb.ExistsResponse, error) { +// ShouldInitiateUpload returns whether an upload should be initiated for the +// given build ID. Checking if an upload should even be initiated allows the +// parca-agent to avoid extracting debuginfos unnecessarily from a binary. +func (s *Store) ShouldInitiateUpload(ctx context.Context, req *debuginfopb.ShouldInitiateUploadRequest) (*debuginfopb.ShouldInitiateUploadResponse, error) { span := trace.SpanFromContext(ctx) span.SetAttributes(attribute.String("build_id", req.GetBuildId())) @@ -119,266 +135,197 @@ func (s *Store) Exists(ctx context.Context, req *debuginfopb.ExistsRequest) (*de return nil, status.Error(codes.InvalidArgument, err.Error()) } - found, err := s.find(ctx, buildID) - if err != nil { - return nil, err + attemptDebuginfod := true + dbginfo, err := s.metadata.Fetch(ctx, buildID) + if err != nil && !errors.Is(err, ErrMetadataNotFound) { + return nil, status.Error(codes.Internal, err.Error()) } + if err == nil { + if dbginfo.Source == debuginfopb.Debuginfo_SOURCE_UPLOAD { + if dbginfo.Upload != nil { + if dbginfo.Upload.State == debuginfopb.DebuginfoUpload_STATE_UPLOADING { + // State is uploading, so we should only allow a new upload + // if the upload that was started is stale. Signed URLs + // will have expired by this point so it's safe to do. + return &debuginfopb.ShouldInitiateUploadResponse{ + ShouldInitiateUpload: s.uploadIsStale(dbginfo.Upload), + }, nil + } + } + if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf { + // We already have debuginfo that's also not marked to be + // invalid, so we don't need to upload it again. We should only + // try something different if we don't have any debuginfo to + // work with. + return &debuginfopb.ShouldInitiateUploadResponse{ + ShouldInitiateUpload: false, + }, nil + } - if found { - metadataFile, err := s.metadata.Fetch(ctx, buildID) - if err != nil { - if errors.Is(err, ErrMetadataNotFound) { - return &debuginfopb.ExistsResponse{Exists: false}, nil + // We have debuginfo but it's not a valid ELF file, so we accept + // whatever we can get, maybe it's better. + } + + if dbginfo.Source == debuginfopb.Debuginfo_SOURCE_DEBUGINFOD { + if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf { + // We already have debuginfo that's also not marked to be + // invalid, so we don't need to upload it again. + return &debuginfopb.ShouldInitiateUploadResponse{ + ShouldInitiateUpload: false, + }, nil } - return nil, status.Error(codes.Internal, err.Error()) + + // We have debuginfo but it's not a valid ELF file, so we accept + // whatever we can get, maybe it's better. But we've already tried + // the debuginfod debuginfos and they were no good. + attemptDebuginfod = false } + } - // metadata.Hash should nt be empty, but let's have the check just in case. - if metadataFile.Hash != "" && metadataFile.Hash == req.Hash { - return &debuginfopb.ExistsResponse{Exists: true}, nil + if attemptDebuginfod { + existsInDebuginfod, err := s.debuginfodClient.Exists(ctx, buildID) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) } - var exists bool - // If it is not an exact version of the source object file what we have so, let the client try to upload it. - if metadataFile.State == MetadataStateUploading { - exists = !isStale(metadataFile) + if existsInDebuginfod { + // The debuginfo exists in debuginfod, so we don't need to upload it. + return &debuginfopb.ShouldInitiateUploadResponse{ShouldInitiateUpload: false}, s.metadata.MarkAsDebuginfodSource(ctx, buildID) } - return &debuginfopb.ExistsResponse{Exists: exists}, nil } - return &debuginfopb.ExistsResponse{Exists: false}, nil + return &debuginfopb.ShouldInitiateUploadResponse{ShouldInitiateUpload: true}, nil } -func (s *Store) Upload(stream debuginfopb.DebugInfoService_UploadServer) error { - req, err := stream.Recv() +func (s *Store) InitiateUpload(ctx context.Context, req *debuginfopb.InitiateUploadRequest) (*debuginfopb.InitiateUploadResponse, error) { + // We don't want to blindly accept upload initiation requests that + // shouldn't have happened. + shouldInitiateResp, err := s.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{ + BuildId: req.BuildId, + }) if err != nil { - msg := "failed to receive upload info" - level.Error(s.logger).Log("msg", msg, "err", err) - return status.Errorf(codes.Unknown, msg) + return nil, err } - - var ( - buildID = req.GetInfo().BuildId - hash = req.GetInfo().Hash - r = &UploadReader{stream: stream} - ) - - ctx := stream.Context() - span := trace.SpanFromContext(ctx) - span.SetAttributes(attribute.String("build_id", buildID)) - span.SetAttributes(attribute.String("hash", hash)) - - if err := s.upload(ctx, buildID, hash, r); err != nil { - return err + if !shouldInitiateResp.ShouldInitiateUpload { + return nil, status.Error(codes.FailedPrecondition, "upload should not have been attempted to be initiated") } - level.Debug(s.logger).Log("msg", "debug info uploaded", "buildid", buildID) - return stream.SendAndClose(&debuginfopb.UploadResponse{ - BuildId: buildID, - Size: r.size, - }) -} - -func (s *Store) upload(ctx context.Context, buildID, hash string, r io.Reader) error { - if err := validateInput(buildID); err != nil { - return status.Error(codes.InvalidArgument, fmt.Errorf("invalid build ID: %w", err).Error()) - } + span := trace.SpanFromContext(ctx) + span.SetAttributes(attribute.String("build_id", req.BuildId)) + uploadID := uuid.New().String() - if err := validateInput(hash); err != nil { - return status.Error(codes.InvalidArgument, fmt.Errorf("invalid hash: %w", err).Error()) + if req.Size > s.maxUploadSize { + return nil, status.Errorf(codes.InvalidArgument, "upload size %d exceeds maximum allowed size %d", req.Size, s.maxUploadSize) } - level.Debug(s.logger).Log("msg", "trying to upload debug info", "buildid", buildID) + uploadStarted := s.timeNow() + uploadExpiry := uploadStarted.Add(s.maxUploadDuration) - metadataFile, err := s.metadata.Fetch(ctx, buildID) - if err == nil { - level.Debug(s.logger).Log("msg", "fetching metadata state", "result", metadataFile) - - switch metadataFile.State { - case MetadataStateCorrupted: - // Corrupted. Re-upload. - case MetadataStateUploaded: - // The debug info was fully uploaded. - return status.Error(codes.AlreadyExists, "debuginfo already exists") - case MetadataStateUploading: - if !isStale(metadataFile) { - return status.Error(codes.AlreadyExists, "debuginfo already exists, being uploaded right now") - } - // The debug info upload operation most likely failed. - default: - return status.Error(codes.Internal, "unknown metadata state") - } - } else { - if !errors.Is(err, ErrMetadataNotFound) { - level.Error(s.logger).Log("msg", "failed to fetch metadata state", "err", err) - } + if !s.signedUpload.Enabled { + return &debuginfopb.InitiateUploadResponse{ + UploadId: uploadID, + UploadStrategy: debuginfopb.InitiateUploadResponse_UPLOAD_STRATEGY_GRPC, + }, s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, timestamppb.New(uploadStarted)) } - found, err := s.find(ctx, buildID) + signedURL, err := s.signedUpload.Client.SignedPUT(ctx, objectPath(req.BuildId), req.Size, uploadExpiry) if err != nil { - return err + return nil, status.Error(codes.Internal, err.Error()) } - if found && (metadataFile == nil || (metadataFile != nil && metadataFile.State != MetadataStateCorrupted)) { - if hash != "" && metadataFile != nil && metadataFile.Hash == hash { - level.Debug(s.logger).Log("msg", "debug info already exists", "buildid", buildID) - return status.Error(codes.AlreadyExists, "debuginfo already exists") - } - - objFile, _, err := s.FetchDebugInfo(ctx, buildID) - if err != nil && !errors.Is(err, ErrDebugInfoNotFound) { - return status.Error(codes.Internal, err.Error()) - } - - if err == nil { - if err := elfutils.ValidateFile(objFile); err != nil { - // Failed to validate. Mark the file as corrupted, and let the client try to upload it again. - if err := s.metadata.MarkAsCorrupted(ctx, buildID); err != nil { - level.Warn(s.logger).Log("msg", "failed to update metadata as corrupted", "err", err) - } - level.Error(s.logger).Log("msg", "failed to validate object file", "buildid", buildID) - // Client will retry. - return status.Error(codes.Internal, fmt.Errorf("validate elf file: %w", err).Error()) - } + return &debuginfopb.InitiateUploadResponse{ + UploadId: uploadID, + UploadStrategy: debuginfopb.InitiateUploadResponse_UPLOAD_STRATEGY_SIGNED_URL, + SignedUrl: signedURL, + }, s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, timestamppb.New(uploadStarted)) +} - // Valid. - f, err := elf.Open(objFile) - if err != nil { - level.Debug(s.logger).Log("msg", "failed to open object file", "err", err) - } else { - hasDWARF, err := elfutils.HasDWARF(f) - if err != nil { - level.Debug(s.logger).Log("msg", "failed to check for DWARF", "err", err) - } - f.Close() +func (s *Store) MarkUploadFinished(ctx context.Context, req *debuginfopb.MarkUploadFinishedRequest) (*debuginfopb.MarkUploadFinishedResponse, error) { + span := trace.SpanFromContext(ctx) + span.SetAttributes(attribute.String("build_id", req.BuildId)) + span.SetAttributes(attribute.String("upload_id", req.UploadId)) - if hasDWARF { - return status.Error(codes.AlreadyExists, "debuginfo already exists") - } - } - } + buildID := req.BuildId + if err := validateInput(buildID); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) } - // At this point we know that we received a better version of the debug information file, - // so let the client upload it. - - if err := s.metadata.MarkAsUploading(ctx, buildID); err != nil { - return status.Error(codes.Internal, fmt.Errorf("failed to update metadata before uploading: %w", err).Error()) + err := s.metadata.MarkAsUploaded(ctx, buildID, req.UploadId, timestamppb.New(s.timeNow())) + if errors.Is(err, ErrDebuginfoNotFound) { + return nil, status.Error(codes.NotFound, "no debuginfo metadata found for build id") } - - // limitio.Writer is used to avoid buffer overflow. - // We only need to read the first 64 bytes (at most). - // The ELF header is 52 or 64 bytes long for 32-bit and 64-bit binaries respectively. - // If we receive a longer data, we will ignore the rest without an error. - b := bytes.NewBuffer(nil) - w := limitio.NewWriter(b, 64, true) - - // Here we're optimistically uploading the received stream directly to the bucket, - // and if something goes wrong we mark it as corrupted, so it could be overwritten in subsequent calls. - // We only want to make sure we don't read a corrupted file while symbolizing. - // Ww also wanted to prevent any form of buffering for this data on the server-side, - // thus the optimistic writes directly to the object-store while also writing the header of the file into a buffer, - // so we can validate the ELF header. - if err := s.bucket.Upload(ctx, objectPath(buildID), io.TeeReader(r, w)); err != nil { - msg := "failed to upload" - level.Error(s.logger).Log("msg", msg, "err", err) - return status.Errorf(codes.Unknown, msg) + if errors.Is(err, ErrUploadMetadataNotFound) { + return nil, status.Error(codes.NotFound, "no debuginfo upload metadata found for build id") } - - if err := elfutils.ValidateHeader(b); err != nil { - // Failed to validate. Mark the incoming stream as corrupted, and let the client try to upload it again. - if err := s.metadata.MarkAsCorrupted(ctx, buildID); err != nil { - err = fmt.Errorf("failed to update metadata after uploaded, as corrupted: %w", err) - return status.Error(codes.Internal, err.Error()) - } - return status.Error(codes.InvalidArgument, fmt.Errorf("validate elf header: %w", err).Error()) + if errors.Is(err, ErrUploadIDMismatch) { + return nil, status.Error(codes.InvalidArgument, "upload id mismatch") } - - if err := s.metadata.MarkAsUploaded(ctx, buildID, hash); err != nil { - return status.Error(codes.Internal, fmt.Errorf("failed to update metadata after uploaded: %w", err).Error()) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) } - return nil + return &debuginfopb.MarkUploadFinishedResponse{}, nil } -func isStale(metadataFile *Metadata) bool { - return time.Now().Add(-15 * time.Minute).After(time.Unix(metadataFile.UploadStartedAt, 0)) -} - -func (s *Store) Download(req *debuginfopb.DownloadRequest, stream debuginfopb.DebugInfoService_DownloadServer) error { - if err := validateInput(req.BuildId); err != nil { - return status.Error(codes.InvalidArgument, err.Error()) +func (s *Store) Upload(stream debuginfopb.DebuginfoService_UploadServer) error { + req, err := stream.Recv() + if err != nil { + return status.Errorf(codes.Unknown, "failed to receive upload info: %q", err) } + var ( + buildID = req.GetInfo().BuildId + uploadID = req.GetInfo().UploadId + r = &UploadReader{stream: stream} + ) + ctx := stream.Context() - found, err := s.find(ctx, req.BuildId) - if err != nil { - return err - } + span := trace.SpanFromContext(ctx) + span.SetAttributes(attribute.String("build_id", buildID)) + span.SetAttributes(attribute.String("upload_id", uploadID)) - if !found { - return status.Error(codes.NotFound, "debuginfo not found") + if err := s.upload(ctx, buildID, uploadID, r); err != nil { + return err } - metadata, err := s.metadata.Fetch(ctx, req.BuildId) - if err != nil { - return status.Error(codes.Internal, err.Error()) - } + return stream.SendAndClose(&debuginfopb.UploadResponse{ + BuildId: buildID, + Size: r.size, + }) +} - switch metadata.State { - case MetadataStateCorrupted: - return status.Error(codes.FailedPrecondition, "debuginfo is corrupted") - case MetadataStateUploading: - return status.Error(codes.Unavailable, "debuginfo is being uploaded") +func (s *Store) upload(ctx context.Context, buildID, uploadID string, r io.Reader) error { + if err := validateInput(buildID); err != nil { + return status.Errorf(codes.InvalidArgument, "invalid build ID: %q", err) } - objFile, source, err := s.FetchDebugInfo(ctx, req.BuildId) + dbginfo, err := s.metadata.Fetch(ctx, buildID) if err != nil { - if errors.Is(err, ErrDebugInfoNotFound) { - return status.Error(codes.NotFound, err.Error()) + if errors.Is(err, ErrMetadataNotFound) { + return status.Error(codes.FailedPrecondition, "metadata not found, this indicates that the upload was not previously initiated") } return status.Error(codes.Internal, err.Error()) } - if err := stream.Send(&debuginfopb.DownloadResponse{ - Data: &debuginfopb.DownloadResponse_Info{ - Info: &debuginfopb.DownloadInfo{ - Source: source, - }, - }, - }); err != nil { - return err + if dbginfo.Upload == nil { + return status.Error(codes.FailedPrecondition, "metadata not found, this indicates that the upload was not previously initiated") } - f, err := os.Open(objFile) - if err != nil { - return status.Error(codes.Internal, err.Error()) + if dbginfo.Upload.Id != uploadID { + return status.Error(codes.InvalidArgument, "the upload ID does not match the one returned by the InitiateUpload call") } - buf := make([]byte, ChunkSize) - bytesSent := 0 - for { - n, err := f.Read(buf) - if err != nil { - if err == io.EOF { - break - } - return fmt.Errorf("read next chunk (%d bytes sent so far): %w", bytesSent, err) - } - bytesSent += n - - if err := stream.Send(&debuginfopb.DownloadResponse{ - Data: &debuginfopb.DownloadResponse_ChunkData{ - ChunkData: buf[:n], - }, - }); err != nil { - return err - } + if err := s.bucket.Upload(ctx, objectPath(buildID), r); err != nil { + return status.Error(codes.Internal, fmt.Errorf("upload debuginfo: %w", err).Error()) } return nil } +func (s *Store) uploadIsStale(upload *debuginfopb.DebuginfoUpload) bool { + return upload.StartedAt.AsTime().Add(s.maxUploadDuration + 2*time.Minute).Before(s.timeNow()) +} + func validateInput(id string) error { _, err := hex.DecodeString(id) if err != nil { @@ -412,163 +359,6 @@ func (s *Store) find(ctx context.Context, key string) (bool, error) { return found, nil } -func (s *Store) FetchDebugInfo(ctx context.Context, buildID string) (string, debuginfopb.DownloadInfo_Source, error) { - logger := log.With(s.logger, "buildid", buildID) - - source := debuginfopb.DownloadInfo_SOURCE_UNKNOWN_UNSPECIFIED - objFile, err := s.fetchFromObjectStore(ctx, buildID) - if err != nil { - // It's ok if we don't have the symbols for given BuildID, it happens too often. - if errors.Is(err, ErrDebugInfoNotFound) { - level.Debug(logger).Log("msg", "failed to fetch object", "err", err) - } else { - level.Warn(logger).Log("msg", "failed to fetch object", "err", err) - } - - // Let's try to find a debug file from debuginfod servers. - objFile, err = s.fetchDebuginfodFile(ctx, buildID) - if err != nil { - return "", source, fmt.Errorf("failed to fetch: %w", err) - } - source = debuginfopb.DownloadInfo_SOURCE_DEBUGINFOD - } else { - source = debuginfopb.DownloadInfo_SOURCE_UPLOAD - } - - // Let's make sure we have the best version of the debug file. - if err := elfutils.ValidateFile(objFile); err != nil { - level.Warn(logger).Log("msg", "failed to validate debug information", "err", err) - // Mark the file as corrupted, and let the client try to upload it again. - err := s.metadata.MarkAsCorrupted(ctx, buildID) - if err != nil { - level.Warn(logger).Log( - "msg", "failed to mark debug information", - "err", fmt.Errorf("failed to update metadata for corrupted: %w", err), - ) - } - if source != debuginfopb.DownloadInfo_SOURCE_DEBUGINFOD { - dbgFile, err := s.fetchDebuginfodFile(ctx, buildID) - if err != nil { - level.Warn(logger).Log("msg", "failed to fetch debuginfod file", "err", err) - } else { - objFile = dbgFile - source = debuginfopb.DownloadInfo_SOURCE_DEBUGINFOD - } - } - } - - if source != debuginfopb.DownloadInfo_SOURCE_DEBUGINFOD { - f, err := elf.Open(objFile) - if err != nil { - level.Debug(logger).Log("msg", "failed to open object file", "err", err) - } else { - hasDWARF, err := elfutils.HasDWARF(f) - if err != nil { - level.Debug(logger).Log("msg", "failed to check for DWARF", "err", err) - } - f.Close() - - if !hasDWARF { - // Try to download a better version from debuginfod servers. - dbgFile, err := s.fetchDebuginfodFile(ctx, buildID) - if err != nil { - if errors.Is(err, ErrDebugInfoNotFound) { - level.Debug(logger).Log("msg", "failed to fetch debuginfod file", "err", err) - } else { - level.Warn(logger).Log("msg", "failed to fetch debuginfod file", "err", err) - } - } else { - objFile = dbgFile - source = debuginfopb.DownloadInfo_SOURCE_DEBUGINFOD - } - } - } - } - - return objFile, source, nil -} - -func (s *Store) fetchFromObjectStore(ctx context.Context, buildID string) (string, error) { - logger := log.With(s.logger, "buildid", buildID) - - objFile := s.localCachePath(buildID) - // Check if it's already cached locally; if not download. - if _, err := os.Stat(objFile); os.IsNotExist(err) { - // Download the debuginfo file from the bucket. - r, err := s.bucket.Get(ctx, objectPath(buildID)) - if err != nil { - if s.bucket.IsObjNotFoundErr(err) { - level.Debug(logger).Log("msg", "failed to fetch object from object storage", "err", err) - return "", ErrDebugInfoNotFound - } - return "", fmt.Errorf("failed to fetch object: %w", err) - } - - // Cache the file locally. - if err := s.cache(objFile, r); err != nil { - return "", fmt.Errorf("failed to fetch debug info file: %w", err) - } - } - - return objFile, nil -} - -func (s *Store) fetchDebuginfodFile(ctx context.Context, buildID string) (string, error) { - logger := log.With(s.logger, "buildid", buildID) - level.Debug(logger).Log("msg", "attempting to download from debuginfod servers") - - objFile := s.localCachePath(buildID) - // Try downloading the debuginfo file from the debuginfod server. - r, err := s.debuginfodClient.GetDebugInfo(ctx, buildID) - if err != nil { - level.Debug(logger).Log("msg", "failed to download debuginfo from debuginfod", "err", err) - return "", fmt.Errorf("failed to fetch from debuginfod: %w", err) - } - defer r.Close() - level.Info(logger).Log("msg", "debug info downloaded from debuginfod server") - - // Cache the file locally. - if err := s.cache(objFile, r); err != nil { - level.Debug(logger).Log("msg", "failed to cache debuginfo", "err", err) - return "", fmt.Errorf("failed to fetch from debuginfod: %w", err) - } - - return objFile, nil -} - -func (s *Store) localCachePath(buildID string) string { - return path.Join(s.cacheDir, buildID, "debuginfo") -} - -func (s *Store) cache(localPath string, r io.ReadCloser) error { - tmpfile, err := os.CreateTemp(s.cacheDir, "symbol-download-*") - if err != nil { - return fmt.Errorf("create temp file: %w", err) - } - defer os.Remove(tmpfile.Name()) - - written, err := io.Copy(tmpfile, r) - if err != nil { - return fmt.Errorf("copy debug info file to local temp file: %w", err) - } - if err := tmpfile.Close(); err != nil { - return fmt.Errorf("close tempfile to write debug info file: %w", err) - } - if written == 0 { - return fmt.Errorf("received empty debug info: %w", ErrDebugInfoNotFound) - } - - err = os.MkdirAll(path.Dir(localPath), 0o700) - if err != nil { - return fmt.Errorf("create debug info file directory: %w", err) - } - // Need to use rename to make the "creation" atomic. - if err := os.Rename(tmpfile.Name(), localPath); err != nil { - return fmt.Errorf("atomically move downloaded debug info file: %w", err) - } - return nil -} - func objectPath(buildID string) string { return path.Join(buildID, "debuginfo") } diff --git a/pkg/debuginfo/store_test.go b/pkg/debuginfo/store_test.go index c4431c9fa9a..6e2ec5b6b47 100644 --- a/pkg/debuginfo/store_test.go +++ b/pkg/debuginfo/store_test.go @@ -16,27 +16,44 @@ package debuginfo import ( "bytes" "context" - "encoding/hex" "io" stdlog "log" "net" "os" "testing" + "time" "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "github.com/thanos-io/objstore/client" - "github.com/thanos-io/objstore/providers/filesystem" + "github.com/thanos-io/objstore" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "gopkg.in/yaml.v2" debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1" ) +type fakeDebuginfodClient struct { + items map[string]io.ReadCloser + err error +} + +func (c *fakeDebuginfodClient) Get(ctx context.Context, buildid string) (io.ReadCloser, error) { + item, ok := c.items[buildid] + if !ok { + return nil, ErrDebuginfoNotFound + } + + return item, nil +} + +func (c *fakeDebuginfodClient) Exists(ctx context.Context, buildid string) (bool, error) { + _, ok := c.items[buildid] + return ok, nil +} + func TestStore(t *testing.T) { + ctx := context.Background() tracer := trace.NewNoopTracerProvider().Tracer("") dir, err := os.MkdirTemp("", "parca-test") @@ -48,24 +65,26 @@ func TestStore(t *testing.T) { defer os.RemoveAll(cacheDir) logger := log.NewNopLogger() - cfg, err := yaml.Marshal(&client.BucketConfig{ - Type: client.FILESYSTEM, - Config: filesystem.Config{ - Directory: dir, - }, - }) - require.NoError(t, err) - - bucket, err := client.NewBucket(logger, cfg, prometheus.NewRegistry(), "parca/store") + bucket := objstore.NewInMemBucket() require.NoError(t, err) + metadata := NewObjectStoreMetadata(logger, bucket) s, err := NewStore( tracer, logger, cacheDir, - NewObjectStoreMetadata(logger, bucket), + metadata, bucket, - NopDebugInfodClient{}, + &fakeDebuginfodClient{ + items: map[string]io.ReadCloser{ + "deadbeef": io.NopCloser(bytes.NewBufferString("debuginfo1")), + }, + }, + SignedUpload{ + Enabled: false, + }, + time.Minute*15, + 1024*1024*1024, ) require.NoError(t, err) @@ -75,7 +94,7 @@ func TestStore(t *testing.T) { } grpcServer := grpc.NewServer() defer grpcServer.GracefulStop() - debuginfopb.RegisterDebugInfoServiceServer(grpcServer, s) + debuginfopb.RegisterDebuginfoServiceServer(grpcServer, s) go func() { err := grpcServer.Serve(lis) if err != nil { @@ -86,7 +105,7 @@ func TestStore(t *testing.T) { conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) defer conn.Close() - c := NewDebugInfoClient(conn) + c := NewDebuginfoClient(debuginfopb.NewDebuginfoServiceClient(conn)) b := bytes.NewBuffer(nil) for i := 0; i < 1024; i++ { @@ -98,60 +117,76 @@ func TestStore(t *testing.T) { for i := 0; i < 1024; i++ { b.Write([]byte("c")) } - _, err = c.Upload(context.Background(), "abcd", "abcd", b) - require.Error(t, err) - nf, err := os.Open("testdata/validelf_nosections") + // Totally wrong order of upload protocol sequence. + _, err = c.Upload(ctx, &debuginfopb.UploadInfo{BuildId: "abcd"}, bytes.NewReader(b.Bytes())) + require.EqualError(t, err, "close and receive: rpc error: code = FailedPrecondition desc = metadata not found, this indicates that the upload was not previously initiated") + + // Simulate we initiated this upload 30 minutes ago. + s.timeNow = func() time.Time { return time.Now().Add(-30 * time.Minute) } + + shouldInitiateResp, err := c.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{BuildId: "abcd"}) require.NoError(t, err) + require.True(t, shouldInitiateResp.ShouldInitiateUpload) - _, err = c.Upload(context.Background(), hex.EncodeToString([]byte("nosection")), "abcd", nf) - require.Error(t, err) + initiateResp, err := c.InitiateUpload(ctx, &debuginfopb.InitiateUploadRequest{BuildId: "abcd"}) + require.NoError(t, err) - wf, err := os.Open("testdata/validelf_withsections") + // An upload is already in progress. So we should not initiate another one. + shouldInitiateResp, err = c.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{BuildId: "abcd"}) require.NoError(t, err) + require.False(t, shouldInitiateResp.ShouldInitiateUpload) + + // Set time to current time, where the upload should be expired. So we can initiate a new one. + s.timeNow = time.Now - size, err := c.Upload(context.Background(), hex.EncodeToString([]byte("section")), "abcd", wf) + // Correct upload flow. + shouldInitiateResp, err = c.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{BuildId: "abcd"}) require.NoError(t, err) - require.Equal(t, 7079, int(size)) + require.True(t, shouldInitiateResp.ShouldInitiateUpload) - obj, err := s.bucket.Get(context.Background(), hex.EncodeToString([]byte("section"))+"/debuginfo") + initiateResp, err = c.InitiateUpload(ctx, &debuginfopb.InitiateUploadRequest{BuildId: "abcd"}) require.NoError(t, err) - content, err := io.ReadAll(obj) + size, err := c.Upload(ctx, &debuginfopb.UploadInfo{BuildId: "abcd", UploadId: initiateResp.UploadId}, bytes.NewReader(b.Bytes())) require.NoError(t, err) - require.Equal(t, 7079, len(content)) - require.Equal(t, []byte{0x7f, 'E', 'L', 'F'}, content[:4]) + require.Equal(t, 3072, int(size)) - ctx := context.Background() - exists, err := c.Exists(context.Background(), hex.EncodeToString([]byte("section")), "abcd") + _, err = c.MarkUploadFinished(ctx, &debuginfopb.MarkUploadFinishedRequest{BuildId: "abcd", UploadId: initiateResp.UploadId}) require.NoError(t, err) - require.True(t, exists) - buf := bytes.NewBuffer(nil) - downloader, err := c.Downloader(ctx, hex.EncodeToString([]byte("section"))) + obj, err := s.bucket.Get(ctx, "abcd/debuginfo") + require.NoError(t, err) + + content, err := io.ReadAll(obj) require.NoError(t, err) - require.Equal(t, debuginfopb.DownloadInfo_SOURCE_UPLOAD, downloader.Info().Source) + require.Equal(t, 3072, len(content)) + require.Equal(t, b.Bytes(), content) - written, err := downloader.Download(ctx, buf) + // Uploads should not be asked to be initiated again since so far there is + // nothing wrong with the upload. It uploaded successfully and is not + // marked invalid. + shouldInitiateResp, err = c.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{BuildId: "abcd"}) require.NoError(t, err) - require.Equal(t, 7079, written) - require.Equal(t, 7079, buf.Len()) - require.NoError(t, downloader.Close()) + require.False(t, shouldInitiateResp.ShouldInitiateUpload) - // Test only reading the download info. - downloader, err = c.Downloader(ctx, hex.EncodeToString([]byte("section"))) + // If asynchronously we figured out the debuginfo was not a valid ELF file, + // we should allow uploading something else. + // Don't test the whole upload flow again, just the ShouldInitiateUpload part. + require.NoError(t, metadata.MarkAsNotValidELF(ctx, "abcd")) + shouldInitiateResp, err = c.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{BuildId: "abcd"}) require.NoError(t, err) - require.Equal(t, debuginfopb.DownloadInfo_SOURCE_UPLOAD, downloader.Info().Source) - require.NoError(t, downloader.Close()) + require.True(t, shouldInitiateResp.ShouldInitiateUpload) - bucket, err = client.NewBucket(logger, cfg, prometheus.NewRegistry(), "parca/store") + // The debuginfod client should be able to fetch the debuginfo, therefore don't allow uploading. + shouldInitiateResp, err = c.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{BuildId: "deadbeef"}) require.NoError(t, err) + require.False(t, shouldInitiateResp.ShouldInitiateUpload) - // Replace bucket with a new empty one. - s.bucket = bucket + // If we mark the debuginfo as invalid, we should allow uploading. + require.NoError(t, metadata.MarkAsNotValidELF(ctx, "deadbeef")) - // Test that the response is cached. - exists, err = c.Exists(context.Background(), hex.EncodeToString([]byte("section")), "abcd") + shouldInitiateResp, err = c.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{BuildId: "deadbeef"}) require.NoError(t, err) - require.True(t, exists) + require.True(t, shouldInitiateResp.ShouldInitiateUpload) } diff --git a/pkg/parca/parca.go b/pkg/parca/parca.go index 26dfb91b00b..008336f29ca 100644 --- a/pkg/parca/parca.go +++ b/pkg/parca/parca.go @@ -65,6 +65,7 @@ import ( queryservice "github.com/parca-dev/parca/pkg/query" "github.com/parca-dev/parca/pkg/scrape" "github.com/parca-dev/parca/pkg/server" + "github.com/parca-dev/parca/pkg/signedupload" "github.com/parca-dev/parca/pkg/symbol" "github.com/parca-dev/parca/pkg/symbolizer" ) @@ -108,6 +109,10 @@ type Flags struct { DebugInfodUpstreamServers []string `default:"https://debuginfod.elfutils.org" help:"Upstream debuginfod servers. Defaults to https://debuginfod.elfutils.org. It is an ordered list of servers to try. Learn more at https://sourceware.org/elfutils/Debuginfod.html"` DebugInfodHTTPRequestTimeout time.Duration `default:"5m" help:"Timeout duration for HTTP request to upstream debuginfod server. Defaults to 5m"` DebuginfoCacheDir string `default:"/tmp" help:"Path to directory where debuginfo is cached."` + DebuginfoUploadMaxSize int64 `default:"1000000000" help:"Maximum size of debuginfo upload in bytes."` + DebuginfoUploadMaxDuration time.Duration `default:"15m" help:"Maximum duration of debuginfo upload."` + + DebuginfoUploadsSignedURL bool `default:"false" help:"Whether to use signed URLs for debuginfo uploads."` StoreAddress string `kong:"help='gRPC address to send profiles and symbols to.'"` BearerToken string `kong:"help='Bearer token to authenticate with store.'"` @@ -170,6 +175,22 @@ func Run(ctx context.Context, logger log.Logger, reg *prometheus.Registry, flags return err } + var signedUploadClient signedupload.Client + if flags.DebuginfoUploadsSignedURL { + var err error + signedUploadClient, err = signedupload.NewClient( + context.Background(), + cfg.ObjectStorage.Bucket, + ) + + if err != nil { + level.Error(logger).Log("msg", "failed to initialize signed upload client", "err", err) + return err + } + + defer signedUploadClient.Close() + } + var mStr metastorepb.MetastoreServiceServer switch flags.Metastore { case metaStoreBadger: @@ -304,15 +325,15 @@ func Run(ctx context.Context, logger log.Logger, reg *prometheus.Registry, flags return err } - var debugInfodClient debuginfo.DebugInfodClient = debuginfo.NopDebugInfodClient{} + var debuginfodClient debuginfo.DebuginfodClient = debuginfo.NopDebuginfodClient{} if len(flags.DebugInfodUpstreamServers) > 0 { - httpDebugInfoClient, err := debuginfo.NewHTTPDebugInfodClient(logger, flags.DebugInfodUpstreamServers, flags.DebugInfodHTTPRequestTimeout) + httpDebugInfoClient, err := debuginfo.NewHTTPDebuginfodClient(logger, flags.DebugInfodUpstreamServers, flags.DebugInfodHTTPRequestTimeout) if err != nil { level.Error(logger).Log("msg", "failed to initialize debuginfod http client", "err", err) return err } - debugInfodClient, err = debuginfo.NewDebugInfodClientWithObjectStorageCache( + debuginfodClient, err = debuginfo.NewDebuginfodClientWithObjectStorageCache( logger, objstore.NewPrefixedBucket(bucket, "debuginfod-cache"), httpDebugInfoClient, @@ -323,14 +344,20 @@ func Run(ctx context.Context, logger log.Logger, reg *prometheus.Registry, flags } } - dbgInfoMetadata := debuginfo.NewObjectStoreMetadata(logger, bucket) - dbgInfo, err := debuginfo.NewStore( + debuginfoMetadata := debuginfo.NewObjectStoreMetadata(logger, bucket) + debuginfoBucket := objstore.NewPrefixedBucket(bucket, "debuginfo") + dbginfo, err := debuginfo.NewStore( tracerProvider.Tracer("debuginfo"), logger, - flags.DebuginfoCacheDir, - dbgInfoMetadata, - objstore.NewPrefixedBucket(bucket, "debuginfo"), - debugInfodClient, + debuginfoMetadata, + debuginfoBucket, + debuginfodClient, + debuginfo.SignedUpload{ + Enabled: flags.DebuginfoUploadsSignedURL, + Client: signedUploadClient, + }, + flags.DebuginfoUploadMaxDuration, + flags.DebuginfoUploadMaxSize, ) if err != nil { level.Error(logger).Log("msg", "failed to initialize debug info store", "err", err) @@ -364,11 +391,11 @@ func Run(ctx context.Context, logger log.Logger, reg *prometheus.Registry, flags s := symbolizer.New( logger, reg, + debuginfoMetadata, metastore, - dbgInfo, + debuginfo.NewFetcher(debuginfoMetadata, debuginfodClient, debuginfoBucket), sym, flags.DebuginfoCacheDir, - flags.DebuginfoCacheDir, 0, ) ctx, cancel := context.WithCancel(ctx) @@ -419,13 +446,13 @@ func Run(ctx context.Context, logger log.Logger, reg *prometheus.Registry, flags flags.CORSAllowedOrigins, flags.PathPrefix, server.RegisterableFunc(func(ctx context.Context, srv *grpc.Server, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) error { - debuginfopb.RegisterDebugInfoServiceServer(srv, dbgInfo) + debuginfopb.RegisterDebuginfoServiceServer(srv, dbginfo) profilestorepb.RegisterProfileStoreServiceServer(srv, s) profilestorepb.RegisterAgentsServiceServer(srv, s) querypb.RegisterQueryServiceServer(srv, q) scrapepb.RegisterScrapeServiceServer(srv, m) - if err := debuginfopb.RegisterDebugInfoServiceHandlerFromEndpoint(ctx, mux, endpoint, opts); err != nil { + if err := debuginfopb.RegisterDebuginfoServiceHandlerFromEndpoint(ctx, mux, endpoint, opts); err != nil { return err } diff --git a/pkg/signedupload/client.go b/pkg/signedupload/client.go new file mode 100644 index 00000000000..717172e4414 --- /dev/null +++ b/pkg/signedupload/client.go @@ -0,0 +1,107 @@ +// Copyright 2022 The Parca Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signedupload + +import ( + "context" + "fmt" + "io" + "strings" + "time" + + "github.com/thanos-io/objstore/client" + "gopkg.in/yaml.v2" +) + +// DirDelim is the delimiter used to model a directory structure in an object store bucket. +const DirDelim = "/" + +type ErrUnsupportedProvider struct { + Provider client.ObjProvider +} + +func (e ErrUnsupportedProvider) Error() string { + return "provider not supported (only GCS is currently supported): " + string(e.Provider) +} + +type Client interface { + io.Closer + SignedPUT( + ctx context.Context, + objectKey string, + size int64, + expiry time.Time, + ) (string, error) +} + +func NewClient(ctx context.Context, bucketConf *client.BucketConfig) (Client, error) { + if bucketConf.Type != client.GCS { + return nil, ErrUnsupportedProvider{Provider: bucketConf.Type} + } + + config, err := yaml.Marshal(bucketConf.Config) + if err != nil { + return nil, fmt.Errorf("failed to marshal bucket config: %w", err) + } + + c, err := NewGCSClient(ctx, config) + if err != nil { + return nil, fmt.Errorf("failed to create GCS client: %w", err) + } + + return NewPrefixedClient(c, bucketConf.Prefix), nil +} + +func NewPrefixedClient(client Client, prefix string) Client { + if validPrefix(prefix) { + return &PrefixedClient{client: client, prefix: strings.Trim(prefix, DirDelim)} + } + + return client +} + +type PrefixedClient struct { + client Client + prefix string +} + +func (c *PrefixedClient) SignedPUT( + ctx context.Context, + objectKey string, + size int64, + expiry time.Time, +) (string, error) { + return c.client.SignedPUT(ctx, conditionalPrefix(c.prefix, objectKey), size, expiry) +} + +func (c *PrefixedClient) Close() error { + return c.client.Close() +} + +func validPrefix(prefix string) bool { + prefix = strings.Replace(prefix, "/", "", -1) + return len(prefix) > 0 +} + +func conditionalPrefix(prefix, name string) string { + if len(name) > 0 { + return withPrefix(prefix, name) + } + + return name +} + +func withPrefix(prefix, name string) string { + return prefix + DirDelim + name +} diff --git a/pkg/signedupload/gcs.go b/pkg/signedupload/gcs.go new file mode 100644 index 00000000000..37b4ffa78b7 --- /dev/null +++ b/pkg/signedupload/gcs.go @@ -0,0 +1,90 @@ +// Copyright 2022 The Parca Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signedupload + +import ( + "context" + "io" + "strconv" + "time" + + "cloud.google.com/go/storage" + "github.com/pkg/errors" + "github.com/thanos-io/objstore/providers/gcs" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + "gopkg.in/yaml.v2" +) + +type GCSClient struct { + bucket *storage.BucketHandle + closer io.Closer +} + +func NewGCSClient(ctx context.Context, conf []byte) (*GCSClient, error) { + var gc gcs.Config + if err := yaml.Unmarshal(conf, &gc); err != nil { + return nil, err + } + + return NewGCSBucketWithConfig(ctx, gc) +} + +func NewGCSBucketWithConfig(ctx context.Context, gc gcs.Config) (*GCSClient, error) { + if gc.Bucket == "" { + return nil, errors.New("missing Google Cloud Storage bucket name for stored blocks") + } + + var opts []option.ClientOption + + // If ServiceAccount is provided, use them in GCS client, otherwise fallback to Google default logic. + if gc.ServiceAccount != "" { + credentials, err := google.CredentialsFromJSON(ctx, []byte(gc.ServiceAccount), storage.ScopeFullControl) + if err != nil { + return nil, errors.Wrap(err, "failed to create credentials from JSON") + } + opts = append(opts, option.WithCredentials(credentials)) + } + + opts = append(opts, option.WithUserAgent("parca")) + + gcsClient, err := storage.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + + return &GCSClient{ + bucket: gcsClient.Bucket(gc.Bucket), + closer: gcsClient, + }, nil +} + +func (c *GCSClient) Close() error { + return c.closer.Close() +} + +func (c *GCSClient) SignedPUT( + ctx context.Context, + objectKey string, + size int64, + expiry time.Time, +) (string, error) { + return c.bucket.SignedURL(objectKey, &storage.SignedURLOptions{ + Method: "PUT", + Expires: expiry, + Headers: []string{ + "X-Upload-Content-Length:" + strconv.FormatInt(size, 10), + }, + }) +} diff --git a/pkg/symbolizer/symbolizer.go b/pkg/symbolizer/symbolizer.go index abb6fb982d7..662e4030a29 100644 --- a/pkg/symbolizer/symbolizer.go +++ b/pkg/symbolizer/symbolizer.go @@ -15,8 +15,11 @@ package symbolizer import ( "context" + "debug/elf" "errors" "fmt" + "io" + "os" "path/filepath" "strings" "time" @@ -26,13 +29,16 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1" pb "github.com/parca-dev/parca/gen/proto/go/parca/metastore/v1alpha1" "github.com/parca-dev/parca/pkg/profile" "github.com/parca-dev/parca/pkg/runutil" "github.com/parca-dev/parca/pkg/symbol" ) +type DebuginfoMetadata interface { + MarkAsNotValidELF(ctx context.Context, buildID string) error +} + type Symbolizer struct { logger log.Logger // attempts counts the total number of symbolication attempts. @@ -51,31 +57,28 @@ type Symbolizer struct { metastore pb.MetastoreServiceClient symbolizer *symbol.Symbolizer - debuginfo DebugInfoFetcher - - // We want two different cache dirs for debuginfo and debuginfod as one of - // them is intended to be for files that are publicly available the other - // one potentially only privately. - debuginfodCacheDir string - debuginfoCacheDir string + debuginfo DebuginfoFetcher + metadata DebuginfoMetadata batchSize uint32 + + tmpDir string } -type DebugInfoFetcher interface { +type DebuginfoFetcher interface { // Fetch ensures that the debug info for the given build ID is available on // a local filesystem and returns a path to it. - FetchDebugInfo(ctx context.Context, buildID string) (string, debuginfopb.DownloadInfo_Source, error) + FetchDebuginfo(ctx context.Context, buildID string) (io.ReadCloser, error) } func New( logger log.Logger, reg prometheus.Registerer, + metadata DebuginfoMetadata, metastore pb.MetastoreServiceClient, - debuginfo DebugInfoFetcher, + debuginfo DebuginfoFetcher, symbolizer *symbol.Symbolizer, - debuginfodCacheDir string, - debuginfoCacheDir string, + tmpDir string, batchSize uint32, ) *Symbolizer { attemptsTotal := promauto.With(reg).NewCounter( @@ -107,17 +110,17 @@ func New( ) s := Symbolizer{ - logger: log.With(logger, "component", "symbolizer"), - attempts: attemptsTotal, - errors: errorsTotal, - duration: duration, - storeDuration: storeDuration, - metastore: metastore, - symbolizer: symbolizer, - debuginfo: debuginfo, - debuginfodCacheDir: debuginfodCacheDir, - debuginfoCacheDir: debuginfoCacheDir, - batchSize: batchSize, + logger: log.With(logger, "component", "symbolizer"), + attempts: attemptsTotal, + errors: errorsTotal, + duration: duration, + storeDuration: storeDuration, + metastore: metastore, + symbolizer: symbolizer, + debuginfo: debuginfo, + tmpDir: tmpDir, + batchSize: batchSize, + metadata: metadata, } return &s } @@ -320,14 +323,44 @@ func (s *Symbolizer) symbolizeLocationsForMapping(ctx context.Context, m *pb.Map logger := log.With(s.logger, "buildid", m.BuildId) // Fetch the debug info for the build ID. - objFile, _, err := s.debuginfo.FetchDebugInfo(ctx, m.BuildId) + rc, err := s.debuginfo.FetchDebuginfo(ctx, m.BuildId) if err != nil { return nil, fmt.Errorf("fetch debuginfo (BuildID: %q): %w", m.BuildId, err) } + f, err := os.CreateTemp(s.tmpDir, "parca-symbolizer-*") + if err != nil { + return nil, fmt.Errorf("create temp file: %w", err) + } + + _, err = io.Copy(f, rc) + if err != nil { + return nil, fmt.Errorf("copy debuginfo to temp file: %w", err) + } + + if err := rc.Close(); err != nil { + return nil, fmt.Errorf("close debuginfo reader: %w", err) + } + + if err := f.Close(); err != nil { + return nil, fmt.Errorf("close temp file: %w", err) + } + + e, err := elf.Open(f.Name()) + if err != nil { + if err := s.metadata.MarkAsNotValidELF(ctx, m.BuildId); err != nil { + level.Error(logger).Log("msg", "failed to mark build ID as not ELF", "err", err) + } + return nil, fmt.Errorf("open temp file as ELF: %w", err) + } + + if err := e.Close(); err != nil { + return nil, fmt.Errorf("close debuginfo file: %w", err) + } + // At this point we have the best version of the debug information file that we could find. // Let's symbolize it. - lines, err := s.symbolizer.Symbolize(ctx, m, locations, objFile) + lines, err := s.symbolizer.Symbolize(ctx, m, locations, f.Name()) if err != nil { if errors.Is(err, symbol.ErrLinerCreationFailedBefore) { level.Debug(logger).Log("msg", "failed to symbolize before", "err", err) diff --git a/pkg/symbolizer/symbolizer_test.go b/pkg/symbolizer/symbolizer_test.go index 718258a42cc..51ee99f476a 100644 --- a/pkg/symbolizer/symbolizer_test.go +++ b/pkg/symbolizer/symbolizer_test.go @@ -20,6 +20,7 @@ import ( "net" "os" "testing" + "time" "github.com/go-kit/log" "github.com/polarsignals/frostdb" @@ -427,12 +428,6 @@ func setup(t *testing.T) (*grpc.ClientConn, pb.MetastoreServiceClient, *Symboliz ) require.NoError(t, err) - debugInfoCacheDir, err := os.MkdirTemp("", "parca-debuginfo-test-cache-*") - require.NoError(t, err) - t.Cleanup(func() { - os.RemoveAll(debugInfoCacheDir) - }) - symbolizerCacheDir, err := os.MkdirTemp("", "parca-symbolizer-test-cache-*") require.NoError(t, err) t.Cleanup(func() { @@ -454,13 +449,16 @@ func setup(t *testing.T) (*grpc.ClientConn, pb.MetastoreServiceClient, *Symboliz require.NoError(t, err) metadata := debuginfo.NewObjectStoreMetadata(logger, bucket) + debuginfodClient := debuginfo.NopDebuginfodClient{} dbgStr, err := debuginfo.NewStore( tracer, logger, - debugInfoCacheDir, metadata, bucket, - debuginfo.NopDebugInfodClient{}, + debuginfodClient, + debuginfo.SignedUpload{Enabled: false}, + 15*time.Minute, + 1024*1024*1024, ) require.NoError(t, err) @@ -490,7 +488,7 @@ func setup(t *testing.T) (*grpc.ClientConn, pb.MetastoreServiceClient, *Symboliz grpcServer.GracefulStop() }) - debuginfopb.RegisterDebugInfoServiceServer(grpcServer, dbgStr) + debuginfopb.RegisterDebuginfoServiceServer(grpcServer, dbgStr) profilestorepb.RegisterProfileStoreServiceServer(grpcServer, pStr) go func() { @@ -510,10 +508,9 @@ func setup(t *testing.T) (*grpc.ClientConn, pb.MetastoreServiceClient, *Symboliz logger, prometheus.NewRegistry(), metastore, - dbgStr, + debuginfo.NewFetcher(metadata, debuginfodClient, bucket), sym, symbolizerCacheDir, - symbolizerCacheDir, 0, ) } diff --git a/pkg/symbolizer/testdata/2d5745782d486738416336492d624458373872722f43476c446c45724d31434c692d6745383869752d2f5f6765757162714a666856504e464c73585830762f445a554f4e796c324b655554684d463241784b43/metadata b/pkg/symbolizer/testdata/2d5745782d486738416336492d624458373872722f43476c446c45724d31434c692d6745383869752d2f5f6765757162714a666856504e464c73585830762f445a554f4e796c324b655554684d463241784b43/metadata new file mode 100644 index 00000000000..16d7553944d --- /dev/null +++ b/pkg/symbolizer/testdata/2d5745782d486738416336492d624458373872722f43476c446c45724d31434c692d6745383869752d2f5f6765757162714a666856504e464c73585830762f445a554f4e796c324b655554684d463241784b43/metadata @@ -0,0 +1 @@ +{"buildId":"2d5745782d486738416336492d624458373872722f43476c446c45724d31434c692d6745383869752d2f5f6765757162714a666856504e464c73585830762f445a554f4e796c324b655554684d463241784b43","upload":{"id":"my-upload-id","state":"STATE_UPLOADED"},"source":"SOURCE_UPLOAD"} diff --git a/pkg/symbolizer/testdata/2d6912fd3dd64542f6f6294f4bf9cb6c265b3085/metadata b/pkg/symbolizer/testdata/2d6912fd3dd64542f6f6294f4bf9cb6c265b3085/metadata new file mode 100644 index 00000000000..9b8a6cedc90 --- /dev/null +++ b/pkg/symbolizer/testdata/2d6912fd3dd64542f6f6294f4bf9cb6c265b3085/metadata @@ -0,0 +1 @@ +{"buildId":"2d6912fd3dd64542f6f6294f4bf9cb6c265b3085","upload":{"id":"my-upload-id","state":"STATE_UPLOADED"},"source":"SOURCE_UPLOAD"} diff --git a/pkg/symbolizer/testdata/536f474d6962346e6d6839516b665657786e436e2f43476c446c45724d31434c692d6745383869752d2f5f6765757162714a666856504e464c73585830762f545065446b774c707530396845444b4b34534767/metadata b/pkg/symbolizer/testdata/536f474d6962346e6d6839516b665657786e436e2f43476c446c45724d31434c692d6745383869752d2f5f6765757162714a666856504e464c73585830762f545065446b774c707530396845444b4b34534767/metadata new file mode 100644 index 00000000000..b94a9ca4009 --- /dev/null +++ b/pkg/symbolizer/testdata/536f474d6962346e6d6839516b665657786e436e2f43476c446c45724d31434c692d6745383869752d2f5f6765757162714a666856504e464c73585830762f545065446b774c707530396845444b4b34534767/metadata @@ -0,0 +1 @@ +{"buildId":"536f474d6962346e6d6839516b665657786e436e2f43476c446c45724d31434c692d6745383869752d2f5f6765757162714a666856504e464c73585830762f545065446b774c707530396845444b4b34534767","upload":{"id":"my-upload-id","state":"STATE_UPLOADED"},"source":"SOURCE_UPLOAD"} diff --git a/pkg/symbolizer/testdata/595150334c6a706f4957766e4d6c7476614457742f454556526d5a2d665f79675433316e7169685f4a2f5a515a3830714d666c5a756f65714a79615154502f7057517431716e516f4b436b50696e756a474d6f/metadata b/pkg/symbolizer/testdata/595150334c6a706f4957766e4d6c7476614457742f454556526d5a2d665f79675433316e7169685f4a2f5a515a3830714d666c5a756f65714a79615154502f7057517431716e516f4b436b50696e756a474d6f/metadata new file mode 100644 index 00000000000..a9655d0ea41 --- /dev/null +++ b/pkg/symbolizer/testdata/595150334c6a706f4957766e4d6c7476614457742f454556526d5a2d665f79675433316e7169685f4a2f5a515a3830714d666c5a756f65714a79615154502f7057517431716e516f4b436b50696e756a474d6f/metadata @@ -0,0 +1 @@ +{"buildId":"595150334c6a706f4957766e4d6c7476614457742f454556526d5a2d665f79675433316e7169685f4a2f5a515a3830714d666c5a756f65714a79615154502f7057517431716e516f4b436b50696e756a474d6f","upload":{"id":"my-upload-id","state":"STATE_UPLOADED"},"source":"SOURCE_UPLOAD"} diff --git a/pkg/symbolizer/testdata/77364271716762793947664b3479416b4676642d2f417a333159574d503255743036584e4a3867414c2f4d77616c62673256647a494f4437674265727a472f37625364676957463368655f68784f7678745478/metadata b/pkg/symbolizer/testdata/77364271716762793947664b3479416b4676642d2f417a333159574d503255743036584e4a3867414c2f4d77616c62673256647a494f4437674265727a472f37625364676957463368655f68784f7678745478/metadata new file mode 100644 index 00000000000..bb11380084b --- /dev/null +++ b/pkg/symbolizer/testdata/77364271716762793947664b3479416b4676642d2f417a333159574d503255743036584e4a3867414c2f4d77616c62673256647a494f4437674265727a472f37625364676957463368655f68784f7678745478/metadata @@ -0,0 +1 @@ +{"buildId":"77364271716762793947664b3479416b4676642d2f417a333159574d503255743036584e4a3867414c2f4d77616c62673256647a494f4437674265727a472f37625364676957463368655f68784f7678745478","upload":{"id":"my-upload-id","state":"STATE_UPLOADED"},"source":"SOURCE_UPLOAD"} diff --git a/proto/buf.lock b/proto/buf.lock index c0e9a6b04a5..a2b543e3c22 100644 --- a/proto/buf.lock +++ b/proto/buf.lock @@ -4,7 +4,7 @@ deps: - remote: buf.build owner: googleapis repository: googleapis - commit: c0b37eaf6f1f43ecacbc85e4e4d1a440 + commit: faacf837d7304c58b7c9020c7807fa6e - remote: buf.build owner: grpc-ecosystem repository: grpc-gateway diff --git a/proto/parca/debuginfo/v1alpha1/debuginfo.proto b/proto/parca/debuginfo/v1alpha1/debuginfo.proto index 3c6765144fd..48547348952 100644 --- a/proto/parca/debuginfo/v1alpha1/debuginfo.proto +++ b/proto/parca/debuginfo/v1alpha1/debuginfo.proto @@ -2,33 +2,75 @@ syntax = "proto3"; package parca.debuginfo.v1alpha1; -// DebugInfoService is a service that allows storage of debug info -service DebugInfoService { - // Exists returns true if the given build_id has debug info uploaded for it. - rpc Exists(ExistsRequest) returns (ExistsResponse) {} +import "google/protobuf/timestamp.proto"; +// DebuginfoService is a service that allows storage of debug info +service DebuginfoService { // Upload ingests debug info for a given build_id rpc Upload(stream UploadRequest) returns (UploadResponse) {} - // Download returns the debug info for a given build_id. - rpc Download(DownloadRequest) returns (stream DownloadResponse) {} + // ShouldInitiateUpload returns whether an upload for a given build_id should be initiated or not. + rpc ShouldInitiateUpload(ShouldInitiateUploadRequest) returns (ShouldInitiateUploadResponse) {} + + // InitiateUpload returns a strategy and information to upload debug info for a given build_id. + rpc InitiateUpload(InitiateUploadRequest) returns (InitiateUploadResponse) {} + + // MarkUploadFinished marks the upload as finished for a given build_id. + rpc MarkUploadFinished(MarkUploadFinishedRequest) returns (MarkUploadFinishedResponse) {} } -// ExistsRequest request to determine if debug info exists for a given build_id -message ExistsRequest { - // build_id is a unique identifier for the debug data +// ShouldInitiateUploadRequest is the request for ShouldInitiateUpload. +message ShouldInitiateUploadRequest { + // The build_id of the debuginfo. string build_id = 1; +} + +// ShouldInitiateUploadResponse is the response for ShouldInitiateUpload. +message ShouldInitiateUploadResponse { + // Whether an upload should be initiated or not. + bool should_initiate_upload = 1; +} + +// InitiateUploadRequest is the request to initiate an upload. +message InitiateUploadRequest { + // The build_id of the debug info to upload. + string build_id = 1; + // The size of the debug info to upload. + int64 size = 2; +} - // hash is the hash of the debug information file - string hash = 2; +// InitiateUploadResponse is the response to an InitiateUploadRequest. +message InitiateUploadResponse { + // The strategy to use for uploading. + enum UploadStrategy { + // The upload is not allowed. + UPLOAD_STRATEGY_UNSPECIFIED = 0; + // The upload is allowed and should be done via the Upload RPC. + UPLOAD_STRATEGY_GRPC = 1; + // The upload is allowed and should be done via a returned signed URL. + UPLOAD_STRATEGY_SIGNED_URL = 2; + } + + // The upload_id to use for uploading. + string upload_id = 1; + // The strategy to use for uploading. + UploadStrategy upload_strategy = 2; + // The signed url to use for uploading using a PUT request when the upload + // strategy is SIGNED_STRATEGY_URL. + string signed_url = 3; } -// ExistsResponse returns whether the given build_id has debug info -message ExistsResponse { - // exists indicates if there is debug data present for the given build_id - bool exists = 1; +// MarkUploadFinishedRequest is the request to mark an upload as finished. +message MarkUploadFinishedRequest { + // The build_id of the debug info to mark as finished. + string build_id = 1; + // The upload_id of the debug info to mark as finished. + string upload_id = 2; } +// MarkUploadFinishedResponse is the response to a MarkUploadFinishedRequest. +message MarkUploadFinishedResponse {} + // UploadRequest upload debug info message UploadRequest { // data contains either the upload info metadata or the debug info @@ -45,12 +87,8 @@ message UploadRequest { message UploadInfo { // build_id is a unique identifier for the debug data string build_id = 1; - - // hash is the hash of the source file that debug information extracted from - string hash = 2; - - // TODO(kakkoyun): Add SourceHash and use Hash as debuginfo file hash. - // TODO(kakkoyun): Add SourceType enum. + // upload_id is a unique identifier for the upload + string upload_id = 2; } // UploadResponse returns the build_id and the size of the uploaded debug info @@ -62,36 +100,59 @@ message UploadResponse { uint64 size = 2; } -// DownloadRequest upload debug info -message DownloadRequest { - // build_id is a unique identifier for the debug data +// Debuginfo contains metadata about a debuginfo file. +message Debuginfo { + // BuildID is the build ID of the debuginfo. string build_id = 1; -} - -// DownloadRequest returns chunked data of the debuginfo. -message DownloadResponse { - // data contains either the upload info metadata or the debug info - oneof data { - // info is the metadata for the debug info - DownloadInfo info = 1; - // chunk_data is the raw bytes of the debug info - bytes chunk_data = 2; - } -} - -// DownloadInfo metadata for the debug data that is being downloaded. -message DownloadInfo { - // Source enum describes the source a debuginfo is from. + // Source is the source of the debuginfo. enum Source { // To understand when no source is set we have the unknown source. SOURCE_UNKNOWN_UNSPECIFIED = 0; // The debuginfo was uploaded by a user/agent. SOURCE_UPLOAD = 1; - // The debuginfo was downloaded from a public debuginfod server. + // The debuginfo is available from the configured debuginfod server(s). SOURCE_DEBUGINFOD = 2; } - // Source indicates the origin of the debuginfo being downloaded. - Source source = 1; + // Source is the source of the debuginfo. + Source source = 2; + + // DebuginfoUpload is the debuginfo upload metadata. + DebuginfoUpload upload = 3; + + // Quality is the quality of the debuginfo. This is set asynchonously by the + // symbolizer when the debuginfo is actually used. + DebuginfoQuality quality = 4; +} + +// DebuginfoUpload contains metadata about a debuginfo upload. +message DebuginfoUpload { + // UploadID is the ID of the debuginfo upload. + string id = 1; + + // The state of the debuginfo upload. + enum State { + // To understand when no upload state is set we have the unknown state. + STATE_UNKNOWN_UNSPECIFIED = 0; + // The debuginfo is currently being uploaded. + STATE_UPLOADING = 1; + // The debuginfo has been uploaded successfully. + STATE_UPLOADED = 2; + } + + // State is the current state of the debuginfo upload. + State state = 2; + + // StartedAt is the time the debuginfo upload was started. + google.protobuf.Timestamp started_at = 4; + + // FinishedAt is the time the debuginfo upload was finished. + google.protobuf.Timestamp finished_at = 5; +} + +// DebuginfoQuality is the quality of the debuginfo. +message DebuginfoQuality { + // The debuginfo file is not a valid ELF file. + bool not_valid_elf = 1; } diff --git a/proto/parca/profilestore/v1alpha1/profilestore.proto b/proto/parca/profilestore/v1alpha1/profilestore.proto index d50f5d9bf13..278b0e4f35d 100644 --- a/proto/parca/profilestore/v1alpha1/profilestore.proto +++ b/proto/parca/profilestore/v1alpha1/profilestore.proto @@ -66,7 +66,9 @@ message RawSample { service AgentsService { // Agents return the agents that pushed data to the server rpc Agents(AgentsRequest) returns (AgentsResponse) { - option (google.api.http) = {get: "/agents"}; + option (google.api.http) = { + get: "/agents" + }; } } diff --git a/proto/parca/query/v1alpha1/query.proto b/proto/parca/query/v1alpha1/query.proto index bc2021d72a0..0517544ef83 100644 --- a/proto/parca/query/v1alpha1/query.proto +++ b/proto/parca/query/v1alpha1/query.proto @@ -11,32 +11,44 @@ import "parca/profilestore/v1alpha1/profilestore.proto"; service QueryService { // QueryRange performs a profile query over a time range rpc QueryRange(QueryRangeRequest) returns (QueryRangeResponse) { - option (google.api.http) = {get: "/profiles/query_range"}; + option (google.api.http) = { + get: "/profiles/query_range" + }; } // Query performs a profile query rpc Query(QueryRequest) returns (QueryResponse) { - option (google.api.http) = {get: "/profiles/query"}; + option (google.api.http) = { + get: "/profiles/query" + }; } // Series is unimplemented rpc Series(SeriesRequest) returns (SeriesResponse) { - option (google.api.http) = {get: "/profiles/series"}; + option (google.api.http) = { + get: "/profiles/series" + }; } // ProfileTypes returns the list of available profile types. rpc ProfileTypes(ProfileTypesRequest) returns (ProfileTypesResponse) { - option (google.api.http) = {get: "/profiles/types"}; + option (google.api.http) = { + get: "/profiles/types" + }; } // Labels returns the set of label names against a given matching string and time frame rpc Labels(LabelsRequest) returns (LabelsResponse) { - option (google.api.http) = {get: "/profiles/labels"}; + option (google.api.http) = { + get: "/profiles/labels" + }; } // Values returns the set of values that match a given label and time frame rpc Values(ValuesRequest) returns (ValuesResponse) { - option (google.api.http) = {get: "/profiles/labels/{label_name}/values"}; + option (google.api.http) = { + get: "/profiles/labels/{label_name}/values" + }; } // ShareProfile uploads the given profile to pprof.me and returns a link to the profile. diff --git a/proto/parca/scrape/v1alpha1/scrape.proto b/proto/parca/scrape/v1alpha1/scrape.proto index 3abe34e73a8..2bccd04d9d8 100644 --- a/proto/parca/scrape/v1alpha1/scrape.proto +++ b/proto/parca/scrape/v1alpha1/scrape.proto @@ -11,7 +11,9 @@ import "parca/profilestore/v1alpha1/profilestore.proto"; service ScrapeService { // Targets returns the set of scrape targets that are configured rpc Targets(TargetsRequest) returns (TargetsResponse) { - option (google.api.http) = {get: "/targets"}; + option (google.api.http) = { + get: "/targets" + }; } } diff --git a/ui/packages/shared/client/src/parca/debuginfo/v1alpha1/debuginfo.client.ts b/ui/packages/shared/client/src/parca/debuginfo/v1alpha1/debuginfo.client.ts index c6c5dff548f..ffc90b5f803 100644 --- a/ui/packages/shared/client/src/parca/debuginfo/v1alpha1/debuginfo.client.ts +++ b/ui/packages/shared/client/src/parca/debuginfo/v1alpha1/debuginfo.client.ts @@ -3,30 +3,25 @@ // tslint:disable import type { RpcTransport } from "@protobuf-ts/runtime-rpc"; import type { ServiceInfo } from "@protobuf-ts/runtime-rpc"; -import { DebugInfoService } from "./debuginfo"; -import type { DownloadResponse } from "./debuginfo"; -import type { DownloadRequest } from "./debuginfo"; -import type { ServerStreamingCall } from "@protobuf-ts/runtime-rpc"; +import { DebuginfoService } from "./debuginfo"; +import type { MarkUploadFinishedResponse } from "./debuginfo"; +import type { MarkUploadFinishedRequest } from "./debuginfo"; +import type { InitiateUploadResponse } from "./debuginfo"; +import type { InitiateUploadRequest } from "./debuginfo"; +import type { ShouldInitiateUploadResponse } from "./debuginfo"; +import type { ShouldInitiateUploadRequest } from "./debuginfo"; +import type { UnaryCall } from "@protobuf-ts/runtime-rpc"; +import { stackIntercept } from "@protobuf-ts/runtime-rpc"; import type { UploadResponse } from "./debuginfo"; import type { UploadRequest } from "./debuginfo"; import type { ClientStreamingCall } from "@protobuf-ts/runtime-rpc"; -import { stackIntercept } from "@protobuf-ts/runtime-rpc"; -import type { ExistsResponse } from "./debuginfo"; -import type { ExistsRequest } from "./debuginfo"; -import type { UnaryCall } from "@protobuf-ts/runtime-rpc"; import type { RpcOptions } from "@protobuf-ts/runtime-rpc"; /** - * DebugInfoService is a service that allows storage of debug info + * DebuginfoService is a service that allows storage of debug info * - * @generated from protobuf service parca.debuginfo.v1alpha1.DebugInfoService + * @generated from protobuf service parca.debuginfo.v1alpha1.DebuginfoService */ -export interface IDebugInfoServiceClient { - /** - * Exists returns true if the given build_id has debug info uploaded for it. - * - * @generated from protobuf rpc: Exists(parca.debuginfo.v1alpha1.ExistsRequest) returns (parca.debuginfo.v1alpha1.ExistsResponse); - */ - exists(input: ExistsRequest, options?: RpcOptions): UnaryCall; +export interface IDebuginfoServiceClient { /** * Upload ingests debug info for a given build_id * @@ -34,48 +29,69 @@ export interface IDebugInfoServiceClient { */ upload(options?: RpcOptions): ClientStreamingCall; /** - * Download returns the debug info for a given build_id. + * ShouldInitiateUpload returns whether an upload for a given build_id should be initiated or not. + * + * @generated from protobuf rpc: ShouldInitiateUpload(parca.debuginfo.v1alpha1.ShouldInitiateUploadRequest) returns (parca.debuginfo.v1alpha1.ShouldInitiateUploadResponse); + */ + shouldInitiateUpload(input: ShouldInitiateUploadRequest, options?: RpcOptions): UnaryCall; + /** + * InitiateUpload returns a strategy and information to upload debug info for a given build_id. + * + * @generated from protobuf rpc: InitiateUpload(parca.debuginfo.v1alpha1.InitiateUploadRequest) returns (parca.debuginfo.v1alpha1.InitiateUploadResponse); + */ + initiateUpload(input: InitiateUploadRequest, options?: RpcOptions): UnaryCall; + /** + * MarkUploadFinished marks the upload as finished for a given build_id. * - * @generated from protobuf rpc: Download(parca.debuginfo.v1alpha1.DownloadRequest) returns (stream parca.debuginfo.v1alpha1.DownloadResponse); + * @generated from protobuf rpc: MarkUploadFinished(parca.debuginfo.v1alpha1.MarkUploadFinishedRequest) returns (parca.debuginfo.v1alpha1.MarkUploadFinishedResponse); */ - download(input: DownloadRequest, options?: RpcOptions): ServerStreamingCall; + markUploadFinished(input: MarkUploadFinishedRequest, options?: RpcOptions): UnaryCall; } /** - * DebugInfoService is a service that allows storage of debug info + * DebuginfoService is a service that allows storage of debug info * - * @generated from protobuf service parca.debuginfo.v1alpha1.DebugInfoService + * @generated from protobuf service parca.debuginfo.v1alpha1.DebuginfoService */ -export class DebugInfoServiceClient implements IDebugInfoServiceClient, ServiceInfo { - typeName = DebugInfoService.typeName; - methods = DebugInfoService.methods; - options = DebugInfoService.options; +export class DebuginfoServiceClient implements IDebuginfoServiceClient, ServiceInfo { + typeName = DebuginfoService.typeName; + methods = DebuginfoService.methods; + options = DebuginfoService.options; constructor(private readonly _transport: RpcTransport) { } /** - * Exists returns true if the given build_id has debug info uploaded for it. + * Upload ingests debug info for a given build_id * - * @generated from protobuf rpc: Exists(parca.debuginfo.v1alpha1.ExistsRequest) returns (parca.debuginfo.v1alpha1.ExistsResponse); + * @generated from protobuf rpc: Upload(stream parca.debuginfo.v1alpha1.UploadRequest) returns (parca.debuginfo.v1alpha1.UploadResponse); */ - exists(input: ExistsRequest, options?: RpcOptions): UnaryCall { + upload(options?: RpcOptions): ClientStreamingCall { const method = this.methods[0], opt = this._transport.mergeOptions(options); - return stackIntercept("unary", this._transport, method, opt, input); + return stackIntercept("clientStreaming", this._transport, method, opt); } /** - * Upload ingests debug info for a given build_id + * ShouldInitiateUpload returns whether an upload for a given build_id should be initiated or not. * - * @generated from protobuf rpc: Upload(stream parca.debuginfo.v1alpha1.UploadRequest) returns (parca.debuginfo.v1alpha1.UploadResponse); + * @generated from protobuf rpc: ShouldInitiateUpload(parca.debuginfo.v1alpha1.ShouldInitiateUploadRequest) returns (parca.debuginfo.v1alpha1.ShouldInitiateUploadResponse); */ - upload(options?: RpcOptions): ClientStreamingCall { + shouldInitiateUpload(input: ShouldInitiateUploadRequest, options?: RpcOptions): UnaryCall { const method = this.methods[1], opt = this._transport.mergeOptions(options); - return stackIntercept("clientStreaming", this._transport, method, opt); + return stackIntercept("unary", this._transport, method, opt, input); } /** - * Download returns the debug info for a given build_id. + * InitiateUpload returns a strategy and information to upload debug info for a given build_id. * - * @generated from protobuf rpc: Download(parca.debuginfo.v1alpha1.DownloadRequest) returns (stream parca.debuginfo.v1alpha1.DownloadResponse); + * @generated from protobuf rpc: InitiateUpload(parca.debuginfo.v1alpha1.InitiateUploadRequest) returns (parca.debuginfo.v1alpha1.InitiateUploadResponse); */ - download(input: DownloadRequest, options?: RpcOptions): ServerStreamingCall { + initiateUpload(input: InitiateUploadRequest, options?: RpcOptions): UnaryCall { const method = this.methods[2], opt = this._transport.mergeOptions(options); - return stackIntercept("serverStreaming", this._transport, method, opt, input); + return stackIntercept("unary", this._transport, method, opt, input); + } + /** + * MarkUploadFinished marks the upload as finished for a given build_id. + * + * @generated from protobuf rpc: MarkUploadFinished(parca.debuginfo.v1alpha1.MarkUploadFinishedRequest) returns (parca.debuginfo.v1alpha1.MarkUploadFinishedResponse); + */ + markUploadFinished(input: MarkUploadFinishedRequest, options?: RpcOptions): UnaryCall { + const method = this.methods[3], opt = this._transport.mergeOptions(options); + return stackIntercept("unary", this._transport, method, opt, input); } } diff --git a/ui/packages/shared/client/src/parca/debuginfo/v1alpha1/debuginfo.ts b/ui/packages/shared/client/src/parca/debuginfo/v1alpha1/debuginfo.ts index a63fad3758d..76e22e7ab3d 100644 --- a/ui/packages/shared/client/src/parca/debuginfo/v1alpha1/debuginfo.ts +++ b/ui/packages/shared/client/src/parca/debuginfo/v1alpha1/debuginfo.ts @@ -12,37 +12,128 @@ import type { PartialMessage } from "@protobuf-ts/runtime"; import { reflectionMergePartial } from "@protobuf-ts/runtime"; import { MESSAGE_TYPE } from "@protobuf-ts/runtime"; import { MessageType } from "@protobuf-ts/runtime"; +import { Timestamp } from "../../../google/protobuf/timestamp"; /** - * ExistsRequest request to determine if debug info exists for a given build_id + * ShouldInitiateUploadRequest is the request for ShouldInitiateUpload. * - * @generated from protobuf message parca.debuginfo.v1alpha1.ExistsRequest + * @generated from protobuf message parca.debuginfo.v1alpha1.ShouldInitiateUploadRequest */ -export interface ExistsRequest { +export interface ShouldInitiateUploadRequest { /** - * build_id is a unique identifier for the debug data + * The build_id of the debuginfo. * * @generated from protobuf field: string build_id = 1; */ buildId: string; +} +/** + * ShouldInitiateUploadResponse is the response for ShouldInitiateUpload. + * + * @generated from protobuf message parca.debuginfo.v1alpha1.ShouldInitiateUploadResponse + */ +export interface ShouldInitiateUploadResponse { /** - * hash is the hash of the debug information file + * Whether an upload should be initiated or not. * - * @generated from protobuf field: string hash = 2; + * @generated from protobuf field: bool should_initiate_upload = 1; */ - hash: string; + shouldInitiateUpload: boolean; } /** - * ExistsResponse returns whether the given build_id has debug info + * InitiateUploadRequest is the request to initiate an upload. * - * @generated from protobuf message parca.debuginfo.v1alpha1.ExistsResponse + * @generated from protobuf message parca.debuginfo.v1alpha1.InitiateUploadRequest */ -export interface ExistsResponse { +export interface InitiateUploadRequest { + /** + * The build_id of the debug info to upload. + * + * @generated from protobuf field: string build_id = 1; + */ + buildId: string; /** - * exists indicates if there is debug data present for the given build_id + * The size of the debug info to upload. * - * @generated from protobuf field: bool exists = 1; + * @generated from protobuf field: int64 size = 2; */ - exists: boolean; + size: string; +} +/** + * InitiateUploadResponse is the response to an InitiateUploadRequest. + * + * @generated from protobuf message parca.debuginfo.v1alpha1.InitiateUploadResponse + */ +export interface InitiateUploadResponse { + /** + * The upload_id to use for uploading. + * + * @generated from protobuf field: string upload_id = 1; + */ + uploadId: string; + /** + * The strategy to use for uploading. + * + * @generated from protobuf field: parca.debuginfo.v1alpha1.InitiateUploadResponse.UploadStrategy upload_strategy = 2; + */ + uploadStrategy: InitiateUploadResponse_UploadStrategy; + /** + * The signed url to use for uploading using a PUT request when the upload + * strategy is SIGNED_STRATEGY_URL. + * + * @generated from protobuf field: string signed_url = 3; + */ + signedUrl: string; +} +/** + * The strategy to use for uploading. + * + * @generated from protobuf enum parca.debuginfo.v1alpha1.InitiateUploadResponse.UploadStrategy + */ +export enum InitiateUploadResponse_UploadStrategy { + /** + * The upload is not allowed. + * + * @generated from protobuf enum value: UPLOAD_STRATEGY_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + /** + * The upload is allowed and should be done via the Upload RPC. + * + * @generated from protobuf enum value: UPLOAD_STRATEGY_GRPC = 1; + */ + GRPC = 1, + /** + * The upload is allowed and should be done via a returned signed URL. + * + * @generated from protobuf enum value: UPLOAD_STRATEGY_SIGNED_URL = 2; + */ + SIGNED_URL = 2 +} +/** + * MarkUploadFinishedRequest is the request to mark an upload as finished. + * + * @generated from protobuf message parca.debuginfo.v1alpha1.MarkUploadFinishedRequest + */ +export interface MarkUploadFinishedRequest { + /** + * The build_id of the debug info to mark as finished. + * + * @generated from protobuf field: string build_id = 1; + */ + buildId: string; + /** + * The upload_id of the debug info to mark as finished. + * + * @generated from protobuf field: string upload_id = 2; + */ + uploadId: string; +} +/** + * MarkUploadFinishedResponse is the response to a MarkUploadFinishedRequest. + * + * @generated from protobuf message parca.debuginfo.v1alpha1.MarkUploadFinishedResponse + */ +export interface MarkUploadFinishedResponse { } /** * UploadRequest upload debug info @@ -86,11 +177,11 @@ export interface UploadInfo { */ buildId: string; /** - * hash is the hash of the source file that debug information extracted from + * upload_id is a unique identifier for the upload * - * @generated from protobuf field: string hash = 2; + * @generated from protobuf field: string upload_id = 2; */ - hash: string; + uploadId: string; } /** * UploadResponse returns the build_id and the size of the uploaded debug info @@ -112,101 +203,241 @@ export interface UploadResponse { size: string; } /** - * DownloadRequest upload debug info + * Debuginfo contains metadata about a debuginfo file. * - * @generated from protobuf message parca.debuginfo.v1alpha1.DownloadRequest + * @generated from protobuf message parca.debuginfo.v1alpha1.Debuginfo */ -export interface DownloadRequest { +export interface Debuginfo { /** - * build_id is a unique identifier for the debug data + * BuildID is the build ID of the debuginfo. * * @generated from protobuf field: string build_id = 1; */ buildId: string; + /** + * Source is the source of the debuginfo. + * + * @generated from protobuf field: parca.debuginfo.v1alpha1.Debuginfo.Source source = 2; + */ + source: Debuginfo_Source; + /** + * DebuginfoUpload is the debuginfo upload metadata. + * + * @generated from protobuf field: parca.debuginfo.v1alpha1.DebuginfoUpload upload = 3; + */ + upload?: DebuginfoUpload; + /** + * Quality is the quality of the debuginfo. This is set asynchonously by the + * symbolizer when the debuginfo is actually used. + * + * @generated from protobuf field: parca.debuginfo.v1alpha1.DebuginfoQuality quality = 4; + */ + quality?: DebuginfoQuality; } /** - * DownloadRequest returns chunked data of the debuginfo. + * Source is the source of the debuginfo. * - * @generated from protobuf message parca.debuginfo.v1alpha1.DownloadResponse + * @generated from protobuf enum parca.debuginfo.v1alpha1.Debuginfo.Source */ -export interface DownloadResponse { +export enum Debuginfo_Source { /** - * @generated from protobuf oneof: data + * To understand when no source is set we have the unknown source. + * + * @generated from protobuf enum value: SOURCE_UNKNOWN_UNSPECIFIED = 0; */ - data: { - oneofKind: "info"; - /** - * info is the metadata for the debug info - * - * @generated from protobuf field: parca.debuginfo.v1alpha1.DownloadInfo info = 1; - */ - info: DownloadInfo; - } | { - oneofKind: "chunkData"; - /** - * chunk_data is the raw bytes of the debug info - * - * @generated from protobuf field: bytes chunk_data = 2; - */ - chunkData: Uint8Array; - } | { - oneofKind: undefined; - }; + UNKNOWN_UNSPECIFIED = 0, + /** + * The debuginfo was uploaded by a user/agent. + * + * @generated from protobuf enum value: SOURCE_UPLOAD = 1; + */ + UPLOAD = 1, + /** + * The debuginfo is available from the configured debuginfod server(s). + * + * @generated from protobuf enum value: SOURCE_DEBUGINFOD = 2; + */ + DEBUGINFOD = 2 } /** - * DownloadInfo metadata for the debug data that is being downloaded. + * DebuginfoUpload contains metadata about a debuginfo upload. * - * @generated from protobuf message parca.debuginfo.v1alpha1.DownloadInfo + * @generated from protobuf message parca.debuginfo.v1alpha1.DebuginfoUpload */ -export interface DownloadInfo { +export interface DebuginfoUpload { /** - * Source indicates the origin of the debuginfo being downloaded. + * UploadID is the ID of the debuginfo upload. * - * @generated from protobuf field: parca.debuginfo.v1alpha1.DownloadInfo.Source source = 1; + * @generated from protobuf field: string id = 1; */ - source: DownloadInfo_Source; + id: string; + /** + * State is the current state of the debuginfo upload. + * + * @generated from protobuf field: parca.debuginfo.v1alpha1.DebuginfoUpload.State state = 2; + */ + state: DebuginfoUpload_State; + /** + * StartedAt is the time the debuginfo upload was started. + * + * @generated from protobuf field: google.protobuf.Timestamp started_at = 4; + */ + startedAt?: Timestamp; + /** + * FinishedAt is the time the debuginfo upload was finished. + * + * @generated from protobuf field: google.protobuf.Timestamp finished_at = 5; + */ + finishedAt?: Timestamp; } /** - * Source enum describes the source a debuginfo is from. + * The state of the debuginfo upload. * - * @generated from protobuf enum parca.debuginfo.v1alpha1.DownloadInfo.Source + * @generated from protobuf enum parca.debuginfo.v1alpha1.DebuginfoUpload.State */ -export enum DownloadInfo_Source { +export enum DebuginfoUpload_State { /** - * To understand when no source is set we have the unknown source. + * To understand when no upload state is set we have the unknown state. * - * @generated from protobuf enum value: SOURCE_UNKNOWN_UNSPECIFIED = 0; + * @generated from protobuf enum value: STATE_UNKNOWN_UNSPECIFIED = 0; */ UNKNOWN_UNSPECIFIED = 0, /** - * The debuginfo was uploaded by a user/agent. + * The debuginfo is currently being uploaded. * - * @generated from protobuf enum value: SOURCE_UPLOAD = 1; + * @generated from protobuf enum value: STATE_UPLOADING = 1; */ - UPLOAD = 1, + UPLOADING = 1, /** - * The debuginfo was downloaded from a public debuginfod server. + * The debuginfo has been uploaded successfully. * - * @generated from protobuf enum value: SOURCE_DEBUGINFOD = 2; + * @generated from protobuf enum value: STATE_UPLOADED = 2; */ - DEBUGINFOD = 2 + UPLOADED = 2 +} +/** + * DebuginfoQuality is the quality of the debuginfo. + * + * @generated from protobuf message parca.debuginfo.v1alpha1.DebuginfoQuality + */ +export interface DebuginfoQuality { + /** + * The debuginfo file is not a valid ELF file. + * + * @generated from protobuf field: bool not_valid_elf = 1; + */ + notValidElf: boolean; } // @generated message type with reflection information, may provide speed optimized methods -class ExistsRequest$Type extends MessageType { +class ShouldInitiateUploadRequest$Type extends MessageType { constructor() { - super("parca.debuginfo.v1alpha1.ExistsRequest", [ + super("parca.debuginfo.v1alpha1.ShouldInitiateUploadRequest", [ + { no: 1, name: "build_id", kind: "scalar", T: 9 /*ScalarType.STRING*/ } + ]); + } + create(value?: PartialMessage): ShouldInitiateUploadRequest { + const message = { buildId: "" }; + globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); + if (value !== undefined) + reflectionMergePartial(this, message, value); + return message; + } + internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: ShouldInitiateUploadRequest): ShouldInitiateUploadRequest { + let message = target ?? this.create(), end = reader.pos + length; + while (reader.pos < end) { + let [fieldNo, wireType] = reader.tag(); + switch (fieldNo) { + case /* string build_id */ 1: + message.buildId = reader.string(); + break; + default: + let u = options.readUnknownField; + if (u === "throw") + throw new globalThis.Error(`Unknown field ${fieldNo} (wire type ${wireType}) for ${this.typeName}`); + let d = reader.skip(wireType); + if (u !== false) + (u === true ? UnknownFieldHandler.onRead : u)(this.typeName, message, fieldNo, wireType, d); + } + } + return message; + } + internalBinaryWrite(message: ShouldInitiateUploadRequest, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { + /* string build_id = 1; */ + if (message.buildId !== "") + writer.tag(1, WireType.LengthDelimited).string(message.buildId); + let u = options.writeUnknownFields; + if (u !== false) + (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); + return writer; + } +} +/** + * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.ShouldInitiateUploadRequest + */ +export const ShouldInitiateUploadRequest = new ShouldInitiateUploadRequest$Type(); +// @generated message type with reflection information, may provide speed optimized methods +class ShouldInitiateUploadResponse$Type extends MessageType { + constructor() { + super("parca.debuginfo.v1alpha1.ShouldInitiateUploadResponse", [ + { no: 1, name: "should_initiate_upload", kind: "scalar", T: 8 /*ScalarType.BOOL*/ } + ]); + } + create(value?: PartialMessage): ShouldInitiateUploadResponse { + const message = { shouldInitiateUpload: false }; + globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); + if (value !== undefined) + reflectionMergePartial(this, message, value); + return message; + } + internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: ShouldInitiateUploadResponse): ShouldInitiateUploadResponse { + let message = target ?? this.create(), end = reader.pos + length; + while (reader.pos < end) { + let [fieldNo, wireType] = reader.tag(); + switch (fieldNo) { + case /* bool should_initiate_upload */ 1: + message.shouldInitiateUpload = reader.bool(); + break; + default: + let u = options.readUnknownField; + if (u === "throw") + throw new globalThis.Error(`Unknown field ${fieldNo} (wire type ${wireType}) for ${this.typeName}`); + let d = reader.skip(wireType); + if (u !== false) + (u === true ? UnknownFieldHandler.onRead : u)(this.typeName, message, fieldNo, wireType, d); + } + } + return message; + } + internalBinaryWrite(message: ShouldInitiateUploadResponse, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { + /* bool should_initiate_upload = 1; */ + if (message.shouldInitiateUpload !== false) + writer.tag(1, WireType.Varint).bool(message.shouldInitiateUpload); + let u = options.writeUnknownFields; + if (u !== false) + (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); + return writer; + } +} +/** + * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.ShouldInitiateUploadResponse + */ +export const ShouldInitiateUploadResponse = new ShouldInitiateUploadResponse$Type(); +// @generated message type with reflection information, may provide speed optimized methods +class InitiateUploadRequest$Type extends MessageType { + constructor() { + super("parca.debuginfo.v1alpha1.InitiateUploadRequest", [ { no: 1, name: "build_id", kind: "scalar", T: 9 /*ScalarType.STRING*/ }, - { no: 2, name: "hash", kind: "scalar", T: 9 /*ScalarType.STRING*/ } + { no: 2, name: "size", kind: "scalar", T: 3 /*ScalarType.INT64*/ } ]); } - create(value?: PartialMessage): ExistsRequest { - const message = { buildId: "", hash: "" }; + create(value?: PartialMessage): InitiateUploadRequest { + const message = { buildId: "", size: "0" }; globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); if (value !== undefined) - reflectionMergePartial(this, message, value); + reflectionMergePartial(this, message, value); return message; } - internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: ExistsRequest): ExistsRequest { + internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: InitiateUploadRequest): InitiateUploadRequest { let message = target ?? this.create(), end = reader.pos + length; while (reader.pos < end) { let [fieldNo, wireType] = reader.tag(); @@ -214,8 +445,8 @@ class ExistsRequest$Type extends MessageType { case /* string build_id */ 1: message.buildId = reader.string(); break; - case /* string hash */ 2: - message.hash = reader.string(); + case /* int64 size */ 2: + message.size = reader.int64().toString(); break; default: let u = options.readUnknownField; @@ -228,13 +459,13 @@ class ExistsRequest$Type extends MessageType { } return message; } - internalBinaryWrite(message: ExistsRequest, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { + internalBinaryWrite(message: InitiateUploadRequest, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { /* string build_id = 1; */ if (message.buildId !== "") writer.tag(1, WireType.LengthDelimited).string(message.buildId); - /* string hash = 2; */ - if (message.hash !== "") - writer.tag(2, WireType.LengthDelimited).string(message.hash); + /* int64 size = 2; */ + if (message.size !== "0") + writer.tag(2, WireType.Varint).int64(message.size); let u = options.writeUnknownFields; if (u !== false) (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); @@ -242,30 +473,38 @@ class ExistsRequest$Type extends MessageType { } } /** - * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.ExistsRequest + * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.InitiateUploadRequest */ -export const ExistsRequest = new ExistsRequest$Type(); +export const InitiateUploadRequest = new InitiateUploadRequest$Type(); // @generated message type with reflection information, may provide speed optimized methods -class ExistsResponse$Type extends MessageType { +class InitiateUploadResponse$Type extends MessageType { constructor() { - super("parca.debuginfo.v1alpha1.ExistsResponse", [ - { no: 1, name: "exists", kind: "scalar", T: 8 /*ScalarType.BOOL*/ } + super("parca.debuginfo.v1alpha1.InitiateUploadResponse", [ + { no: 1, name: "upload_id", kind: "scalar", T: 9 /*ScalarType.STRING*/ }, + { no: 2, name: "upload_strategy", kind: "enum", T: () => ["parca.debuginfo.v1alpha1.InitiateUploadResponse.UploadStrategy", InitiateUploadResponse_UploadStrategy, "UPLOAD_STRATEGY_"] }, + { no: 3, name: "signed_url", kind: "scalar", T: 9 /*ScalarType.STRING*/ } ]); } - create(value?: PartialMessage): ExistsResponse { - const message = { exists: false }; + create(value?: PartialMessage): InitiateUploadResponse { + const message = { uploadId: "", uploadStrategy: 0, signedUrl: "" }; globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); if (value !== undefined) - reflectionMergePartial(this, message, value); + reflectionMergePartial(this, message, value); return message; } - internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: ExistsResponse): ExistsResponse { + internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: InitiateUploadResponse): InitiateUploadResponse { let message = target ?? this.create(), end = reader.pos + length; while (reader.pos < end) { let [fieldNo, wireType] = reader.tag(); switch (fieldNo) { - case /* bool exists */ 1: - message.exists = reader.bool(); + case /* string upload_id */ 1: + message.uploadId = reader.string(); + break; + case /* parca.debuginfo.v1alpha1.InitiateUploadResponse.UploadStrategy upload_strategy */ 2: + message.uploadStrategy = reader.int32(); + break; + case /* string signed_url */ 3: + message.signedUrl = reader.string(); break; default: let u = options.readUnknownField; @@ -278,10 +517,16 @@ class ExistsResponse$Type extends MessageType { } return message; } - internalBinaryWrite(message: ExistsResponse, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { - /* bool exists = 1; */ - if (message.exists !== false) - writer.tag(1, WireType.Varint).bool(message.exists); + internalBinaryWrite(message: InitiateUploadResponse, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { + /* string upload_id = 1; */ + if (message.uploadId !== "") + writer.tag(1, WireType.LengthDelimited).string(message.uploadId); + /* parca.debuginfo.v1alpha1.InitiateUploadResponse.UploadStrategy upload_strategy = 2; */ + if (message.uploadStrategy !== 0) + writer.tag(2, WireType.Varint).int32(message.uploadStrategy); + /* string signed_url = 3; */ + if (message.signedUrl !== "") + writer.tag(3, WireType.LengthDelimited).string(message.signedUrl); let u = options.writeUnknownFields; if (u !== false) (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); @@ -289,9 +534,89 @@ class ExistsResponse$Type extends MessageType { } } /** - * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.ExistsResponse + * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.InitiateUploadResponse */ -export const ExistsResponse = new ExistsResponse$Type(); +export const InitiateUploadResponse = new InitiateUploadResponse$Type(); +// @generated message type with reflection information, may provide speed optimized methods +class MarkUploadFinishedRequest$Type extends MessageType { + constructor() { + super("parca.debuginfo.v1alpha1.MarkUploadFinishedRequest", [ + { no: 1, name: "build_id", kind: "scalar", T: 9 /*ScalarType.STRING*/ }, + { no: 2, name: "upload_id", kind: "scalar", T: 9 /*ScalarType.STRING*/ } + ]); + } + create(value?: PartialMessage): MarkUploadFinishedRequest { + const message = { buildId: "", uploadId: "" }; + globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); + if (value !== undefined) + reflectionMergePartial(this, message, value); + return message; + } + internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: MarkUploadFinishedRequest): MarkUploadFinishedRequest { + let message = target ?? this.create(), end = reader.pos + length; + while (reader.pos < end) { + let [fieldNo, wireType] = reader.tag(); + switch (fieldNo) { + case /* string build_id */ 1: + message.buildId = reader.string(); + break; + case /* string upload_id */ 2: + message.uploadId = reader.string(); + break; + default: + let u = options.readUnknownField; + if (u === "throw") + throw new globalThis.Error(`Unknown field ${fieldNo} (wire type ${wireType}) for ${this.typeName}`); + let d = reader.skip(wireType); + if (u !== false) + (u === true ? UnknownFieldHandler.onRead : u)(this.typeName, message, fieldNo, wireType, d); + } + } + return message; + } + internalBinaryWrite(message: MarkUploadFinishedRequest, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { + /* string build_id = 1; */ + if (message.buildId !== "") + writer.tag(1, WireType.LengthDelimited).string(message.buildId); + /* string upload_id = 2; */ + if (message.uploadId !== "") + writer.tag(2, WireType.LengthDelimited).string(message.uploadId); + let u = options.writeUnknownFields; + if (u !== false) + (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); + return writer; + } +} +/** + * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.MarkUploadFinishedRequest + */ +export const MarkUploadFinishedRequest = new MarkUploadFinishedRequest$Type(); +// @generated message type with reflection information, may provide speed optimized methods +class MarkUploadFinishedResponse$Type extends MessageType { + constructor() { + super("parca.debuginfo.v1alpha1.MarkUploadFinishedResponse", []); + } + create(value?: PartialMessage): MarkUploadFinishedResponse { + const message = {}; + globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); + if (value !== undefined) + reflectionMergePartial(this, message, value); + return message; + } + internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: MarkUploadFinishedResponse): MarkUploadFinishedResponse { + return target ?? this.create(); + } + internalBinaryWrite(message: MarkUploadFinishedResponse, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { + let u = options.writeUnknownFields; + if (u !== false) + (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); + return writer; + } +} +/** + * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.MarkUploadFinishedResponse + */ +export const MarkUploadFinishedResponse = new MarkUploadFinishedResponse$Type(); // @generated message type with reflection information, may provide speed optimized methods class UploadRequest$Type extends MessageType { constructor() { @@ -357,11 +682,11 @@ class UploadInfo$Type extends MessageType { constructor() { super("parca.debuginfo.v1alpha1.UploadInfo", [ { no: 1, name: "build_id", kind: "scalar", T: 9 /*ScalarType.STRING*/ }, - { no: 2, name: "hash", kind: "scalar", T: 9 /*ScalarType.STRING*/ } + { no: 2, name: "upload_id", kind: "scalar", T: 9 /*ScalarType.STRING*/ } ]); } create(value?: PartialMessage): UploadInfo { - const message = { buildId: "", hash: "" }; + const message = { buildId: "", uploadId: "" }; globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); if (value !== undefined) reflectionMergePartial(this, message, value); @@ -375,8 +700,8 @@ class UploadInfo$Type extends MessageType { case /* string build_id */ 1: message.buildId = reader.string(); break; - case /* string hash */ 2: - message.hash = reader.string(); + case /* string upload_id */ 2: + message.uploadId = reader.string(); break; default: let u = options.readUnknownField; @@ -393,9 +718,9 @@ class UploadInfo$Type extends MessageType { /* string build_id = 1; */ if (message.buildId !== "") writer.tag(1, WireType.LengthDelimited).string(message.buildId); - /* string hash = 2; */ - if (message.hash !== "") - writer.tag(2, WireType.LengthDelimited).string(message.hash); + /* string upload_id = 2; */ + if (message.uploadId !== "") + writer.tag(2, WireType.LengthDelimited).string(message.uploadId); let u = options.writeUnknownFields; if (u !== false) (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); @@ -461,20 +786,23 @@ class UploadResponse$Type extends MessageType { */ export const UploadResponse = new UploadResponse$Type(); // @generated message type with reflection information, may provide speed optimized methods -class DownloadRequest$Type extends MessageType { +class Debuginfo$Type extends MessageType { constructor() { - super("parca.debuginfo.v1alpha1.DownloadRequest", [ - { no: 1, name: "build_id", kind: "scalar", T: 9 /*ScalarType.STRING*/ } + super("parca.debuginfo.v1alpha1.Debuginfo", [ + { no: 1, name: "build_id", kind: "scalar", T: 9 /*ScalarType.STRING*/ }, + { no: 2, name: "source", kind: "enum", T: () => ["parca.debuginfo.v1alpha1.Debuginfo.Source", Debuginfo_Source, "SOURCE_"] }, + { no: 3, name: "upload", kind: "message", T: () => DebuginfoUpload }, + { no: 4, name: "quality", kind: "message", T: () => DebuginfoQuality } ]); } - create(value?: PartialMessage): DownloadRequest { - const message = { buildId: "" }; + create(value?: PartialMessage): Debuginfo { + const message = { buildId: "", source: 0 }; globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); if (value !== undefined) - reflectionMergePartial(this, message, value); + reflectionMergePartial(this, message, value); return message; } - internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: DownloadRequest): DownloadRequest { + internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: Debuginfo): Debuginfo { let message = target ?? this.create(), end = reader.pos + length; while (reader.pos < end) { let [fieldNo, wireType] = reader.tag(); @@ -482,6 +810,15 @@ class DownloadRequest$Type extends MessageType { case /* string build_id */ 1: message.buildId = reader.string(); break; + case /* parca.debuginfo.v1alpha1.Debuginfo.Source source */ 2: + message.source = reader.int32(); + break; + case /* parca.debuginfo.v1alpha1.DebuginfoUpload upload */ 3: + message.upload = DebuginfoUpload.internalBinaryRead(reader, reader.uint32(), options, message.upload); + break; + case /* parca.debuginfo.v1alpha1.DebuginfoQuality quality */ 4: + message.quality = DebuginfoQuality.internalBinaryRead(reader, reader.uint32(), options, message.quality); + break; default: let u = options.readUnknownField; if (u === "throw") @@ -493,10 +830,19 @@ class DownloadRequest$Type extends MessageType { } return message; } - internalBinaryWrite(message: DownloadRequest, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { + internalBinaryWrite(message: Debuginfo, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { /* string build_id = 1; */ if (message.buildId !== "") writer.tag(1, WireType.LengthDelimited).string(message.buildId); + /* parca.debuginfo.v1alpha1.Debuginfo.Source source = 2; */ + if (message.source !== 0) + writer.tag(2, WireType.Varint).int32(message.source); + /* parca.debuginfo.v1alpha1.DebuginfoUpload upload = 3; */ + if (message.upload) + DebuginfoUpload.internalBinaryWrite(message.upload, writer.tag(3, WireType.LengthDelimited).fork(), options).join(); + /* parca.debuginfo.v1alpha1.DebuginfoQuality quality = 4; */ + if (message.quality) + DebuginfoQuality.internalBinaryWrite(message.quality, writer.tag(4, WireType.LengthDelimited).fork(), options).join(); let u = options.writeUnknownFields; if (u !== false) (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); @@ -504,40 +850,42 @@ class DownloadRequest$Type extends MessageType { } } /** - * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.DownloadRequest + * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.Debuginfo */ -export const DownloadRequest = new DownloadRequest$Type(); +export const Debuginfo = new Debuginfo$Type(); // @generated message type with reflection information, may provide speed optimized methods -class DownloadResponse$Type extends MessageType { +class DebuginfoUpload$Type extends MessageType { constructor() { - super("parca.debuginfo.v1alpha1.DownloadResponse", [ - { no: 1, name: "info", kind: "message", oneof: "data", T: () => DownloadInfo }, - { no: 2, name: "chunk_data", kind: "scalar", oneof: "data", T: 12 /*ScalarType.BYTES*/ } + super("parca.debuginfo.v1alpha1.DebuginfoUpload", [ + { no: 1, name: "id", kind: "scalar", T: 9 /*ScalarType.STRING*/ }, + { no: 2, name: "state", kind: "enum", T: () => ["parca.debuginfo.v1alpha1.DebuginfoUpload.State", DebuginfoUpload_State, "STATE_"] }, + { no: 4, name: "started_at", kind: "message", T: () => Timestamp }, + { no: 5, name: "finished_at", kind: "message", T: () => Timestamp } ]); } - create(value?: PartialMessage): DownloadResponse { - const message = { data: { oneofKind: undefined } }; + create(value?: PartialMessage): DebuginfoUpload { + const message = { id: "", state: 0 }; globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); if (value !== undefined) - reflectionMergePartial(this, message, value); + reflectionMergePartial(this, message, value); return message; } - internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: DownloadResponse): DownloadResponse { + internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: DebuginfoUpload): DebuginfoUpload { let message = target ?? this.create(), end = reader.pos + length; while (reader.pos < end) { let [fieldNo, wireType] = reader.tag(); switch (fieldNo) { - case /* parca.debuginfo.v1alpha1.DownloadInfo info */ 1: - message.data = { - oneofKind: "info", - info: DownloadInfo.internalBinaryRead(reader, reader.uint32(), options, (message.data as any).info) - }; + case /* string id */ 1: + message.id = reader.string(); break; - case /* bytes chunk_data */ 2: - message.data = { - oneofKind: "chunkData", - chunkData: reader.bytes() - }; + case /* parca.debuginfo.v1alpha1.DebuginfoUpload.State state */ 2: + message.state = reader.int32(); + break; + case /* google.protobuf.Timestamp started_at */ 4: + message.startedAt = Timestamp.internalBinaryRead(reader, reader.uint32(), options, message.startedAt); + break; + case /* google.protobuf.Timestamp finished_at */ 5: + message.finishedAt = Timestamp.internalBinaryRead(reader, reader.uint32(), options, message.finishedAt); break; default: let u = options.readUnknownField; @@ -550,13 +898,19 @@ class DownloadResponse$Type extends MessageType { } return message; } - internalBinaryWrite(message: DownloadResponse, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { - /* parca.debuginfo.v1alpha1.DownloadInfo info = 1; */ - if (message.data.oneofKind === "info") - DownloadInfo.internalBinaryWrite(message.data.info, writer.tag(1, WireType.LengthDelimited).fork(), options).join(); - /* bytes chunk_data = 2; */ - if (message.data.oneofKind === "chunkData") - writer.tag(2, WireType.LengthDelimited).bytes(message.data.chunkData); + internalBinaryWrite(message: DebuginfoUpload, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { + /* string id = 1; */ + if (message.id !== "") + writer.tag(1, WireType.LengthDelimited).string(message.id); + /* parca.debuginfo.v1alpha1.DebuginfoUpload.State state = 2; */ + if (message.state !== 0) + writer.tag(2, WireType.Varint).int32(message.state); + /* google.protobuf.Timestamp started_at = 4; */ + if (message.startedAt) + Timestamp.internalBinaryWrite(message.startedAt, writer.tag(4, WireType.LengthDelimited).fork(), options).join(); + /* google.protobuf.Timestamp finished_at = 5; */ + if (message.finishedAt) + Timestamp.internalBinaryWrite(message.finishedAt, writer.tag(5, WireType.LengthDelimited).fork(), options).join(); let u = options.writeUnknownFields; if (u !== false) (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); @@ -564,30 +918,30 @@ class DownloadResponse$Type extends MessageType { } } /** - * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.DownloadResponse + * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.DebuginfoUpload */ -export const DownloadResponse = new DownloadResponse$Type(); +export const DebuginfoUpload = new DebuginfoUpload$Type(); // @generated message type with reflection information, may provide speed optimized methods -class DownloadInfo$Type extends MessageType { +class DebuginfoQuality$Type extends MessageType { constructor() { - super("parca.debuginfo.v1alpha1.DownloadInfo", [ - { no: 1, name: "source", kind: "enum", T: () => ["parca.debuginfo.v1alpha1.DownloadInfo.Source", DownloadInfo_Source, "SOURCE_"] } + super("parca.debuginfo.v1alpha1.DebuginfoQuality", [ + { no: 1, name: "not_valid_elf", kind: "scalar", T: 8 /*ScalarType.BOOL*/ } ]); } - create(value?: PartialMessage): DownloadInfo { - const message = { source: 0 }; + create(value?: PartialMessage): DebuginfoQuality { + const message = { notValidElf: false }; globalThis.Object.defineProperty(message, MESSAGE_TYPE, { enumerable: false, value: this }); if (value !== undefined) - reflectionMergePartial(this, message, value); + reflectionMergePartial(this, message, value); return message; } - internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: DownloadInfo): DownloadInfo { + internalBinaryRead(reader: IBinaryReader, length: number, options: BinaryReadOptions, target?: DebuginfoQuality): DebuginfoQuality { let message = target ?? this.create(), end = reader.pos + length; while (reader.pos < end) { let [fieldNo, wireType] = reader.tag(); switch (fieldNo) { - case /* parca.debuginfo.v1alpha1.DownloadInfo.Source source */ 1: - message.source = reader.int32(); + case /* bool not_valid_elf */ 1: + message.notValidElf = reader.bool(); break; default: let u = options.readUnknownField; @@ -600,10 +954,10 @@ class DownloadInfo$Type extends MessageType { } return message; } - internalBinaryWrite(message: DownloadInfo, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { - /* parca.debuginfo.v1alpha1.DownloadInfo.Source source = 1; */ - if (message.source !== 0) - writer.tag(1, WireType.Varint).int32(message.source); + internalBinaryWrite(message: DebuginfoQuality, writer: IBinaryWriter, options: BinaryWriteOptions): IBinaryWriter { + /* bool not_valid_elf = 1; */ + if (message.notValidElf !== false) + writer.tag(1, WireType.Varint).bool(message.notValidElf); let u = options.writeUnknownFields; if (u !== false) (u == true ? UnknownFieldHandler.onWrite : u)(this.typeName, message, writer); @@ -611,14 +965,15 @@ class DownloadInfo$Type extends MessageType { } } /** - * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.DownloadInfo + * @generated MessageType for protobuf message parca.debuginfo.v1alpha1.DebuginfoQuality */ -export const DownloadInfo = new DownloadInfo$Type(); +export const DebuginfoQuality = new DebuginfoQuality$Type(); /** - * @generated ServiceType for protobuf service parca.debuginfo.v1alpha1.DebugInfoService + * @generated ServiceType for protobuf service parca.debuginfo.v1alpha1.DebuginfoService */ -export const DebugInfoService = new ServiceType("parca.debuginfo.v1alpha1.DebugInfoService", [ - { name: "Exists", options: {}, I: ExistsRequest, O: ExistsResponse }, +export const DebuginfoService = new ServiceType("parca.debuginfo.v1alpha1.DebuginfoService", [ { name: "Upload", clientStreaming: true, options: {}, I: UploadRequest, O: UploadResponse }, - { name: "Download", serverStreaming: true, options: {}, I: DownloadRequest, O: DownloadResponse } + { name: "ShouldInitiateUpload", options: {}, I: ShouldInitiateUploadRequest, O: ShouldInitiateUploadResponse }, + { name: "InitiateUpload", options: {}, I: InitiateUploadRequest, O: InitiateUploadResponse }, + { name: "MarkUploadFinished", options: {}, I: MarkUploadFinishedRequest, O: MarkUploadFinishedResponse } ]);