This repository has been archived by the owner on Oct 23, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 62
/
pytorch.pb.go
82 lines (70 loc) · 3.57 KB
/
pytorch.pb.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: flyteidl/plugins/pytorch.proto
package plugins
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator
type DistributedPyTorchTrainingTask struct {
// number of worker replicas spawned in the cluster for this job
Workers int32 `protobuf:"varint,1,opt,name=workers,proto3" json:"workers,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DistributedPyTorchTrainingTask) Reset() { *m = DistributedPyTorchTrainingTask{} }
func (m *DistributedPyTorchTrainingTask) String() string { return proto.CompactTextString(m) }
func (*DistributedPyTorchTrainingTask) ProtoMessage() {}
func (*DistributedPyTorchTrainingTask) Descriptor() ([]byte, []int) {
return fileDescriptor_4df8a9374b28b766, []int{0}
}
func (m *DistributedPyTorchTrainingTask) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DistributedPyTorchTrainingTask.Unmarshal(m, b)
}
func (m *DistributedPyTorchTrainingTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DistributedPyTorchTrainingTask.Marshal(b, m, deterministic)
}
func (m *DistributedPyTorchTrainingTask) XXX_Merge(src proto.Message) {
xxx_messageInfo_DistributedPyTorchTrainingTask.Merge(m, src)
}
func (m *DistributedPyTorchTrainingTask) XXX_Size() int {
return xxx_messageInfo_DistributedPyTorchTrainingTask.Size(m)
}
func (m *DistributedPyTorchTrainingTask) XXX_DiscardUnknown() {
xxx_messageInfo_DistributedPyTorchTrainingTask.DiscardUnknown(m)
}
var xxx_messageInfo_DistributedPyTorchTrainingTask proto.InternalMessageInfo
func (m *DistributedPyTorchTrainingTask) GetWorkers() int32 {
if m != nil {
return m.Workers
}
return 0
}
func init() {
proto.RegisterType((*DistributedPyTorchTrainingTask)(nil), "flyteidl.plugins.DistributedPyTorchTrainingTask")
}
func init() { proto.RegisterFile("flyteidl/plugins/pytorch.proto", fileDescriptor_4df8a9374b28b766) }
var fileDescriptor_4df8a9374b28b766 = []byte{
// 156 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcb, 0xa9, 0x2c,
0x49, 0xcd, 0x4c, 0xc9, 0xd1, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x2b, 0xd6, 0x2f, 0xa8, 0x2c,
0xc9, 0x2f, 0x4a, 0xce, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x80, 0xc9, 0xeb, 0x41,
0xe5, 0x95, 0xac, 0xb8, 0xe4, 0x5c, 0x32, 0x8b, 0x4b, 0x8a, 0x32, 0x93, 0x4a, 0x4b, 0x52, 0x53,
0x02, 0x2a, 0x43, 0x40, 0xaa, 0x43, 0x8a, 0x12, 0x33, 0xf3, 0x32, 0xf3, 0xd2, 0x43, 0x12, 0x8b,
0xb3, 0x85, 0x24, 0xb8, 0xd8, 0xcb, 0xf3, 0x8b, 0xb2, 0x53, 0x8b, 0x8a, 0x25, 0x18, 0x15, 0x18,
0x35, 0x58, 0x83, 0x60, 0x5c, 0x27, 0xcb, 0x28, 0xf3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd,
0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0xd1, 0xf9, 0x45, 0xe9, 0xfa, 0x70, 0x37, 0xa4, 0xa7, 0xe6, 0xe9,
0x17, 0x24, 0xe9, 0xa6, 0xe7, 0xeb, 0xa3, 0x3b, 0x2b, 0x89, 0x0d, 0xec, 0x1e, 0x63, 0x40, 0x00,
0x00, 0x00, 0xff, 0xff, 0x91, 0x53, 0x3a, 0xa1, 0xb1, 0x00, 0x00, 0x00,
}