/
table_aws_backup_job.go
248 lines (227 loc) · 7.58 KB
/
table_aws_backup_job.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
package aws
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/backup"
"github.com/aws/aws-sdk-go-v2/service/backup/types"
backupv1 "github.com/aws/aws-sdk-go/service/backup"
"github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
"github.com/turbot/steampipe-plugin-sdk/v5/plugin"
"github.com/turbot/steampipe-plugin-sdk/v5/plugin/transform"
)
//// TABLE DEFINITION
func tableAwsBackupJob(_ context.Context) *plugin.Table {
return &plugin.Table{
Name: "aws_backup_job",
Description: "AWS Backup Job",
Get: &plugin.GetConfig{
KeyColumns: plugin.SingleColumn("job_id"),
Hydrate: getAwsBackupJob,
Tags: map[string]string{"service": "backup", "action": "DescribeBackupJob"},
},
List: &plugin.ListConfig{
Hydrate: listAwsBackupJobs,
Tags: map[string]string{"service": "backup", "action": "ListBackupJobs"},
},
GetMatrixItemFunc: SupportedRegionMatrix(backupv1.EndpointsID),
Columns: awsRegionalColumns([]*plugin.Column{
{
Name: "job_id",
Description: "The logical id of a backup job.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("BackupJobId"),
},
{
Name: "recovery_point_arn",
Description: "An Amazon Resource Name (ARN) that uniquely identifies a recovery point.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("RecoveryPointArn"),
},
{
Name: "backup_vault_arn",
Description: "An Amazon Resource Name (ARN) that uniquely identifies the target backup vault.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("BackupVaultArn"),
},
{
Name: "resource_type",
Description: "The type of AWS resource to be backed up.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("ResourceType"),
},
{
Name: "resource_arn",
Description: "An Amazon Resource Name (ARN) that uniquely identifies the source resource in the recovery point.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("ResourceArn"),
},
{
Name: "status",
Description: "The current state of a backup job.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("State"),
},
{
Name: "status_message",
Description: "A detailed message explaining the status of the job.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("StatusMessage"),
},
{
Name: "backup_size",
Description: "The size in bytes of a backup.",
Type: proto.ColumnType_INT,
Transform: transform.FromField("BackupSizeInBytes"),
},
{
Name: "backup_vault_name",
Description: "The name of the target backup vault.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("BackupVaultName"),
},
{
Name: "backup_options",
Description: "Specifies the backup options for a selected resource.",
Type: proto.ColumnType_JSON,
Transform: transform.FromField("BackupOptions"),
},
{
Name: "backup_type",
Description: "Represents the type of backup for a backup job.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("BackupType"),
},
{
Name: "bytes_transferred",
Description: "The size in bytes transferred to a backup vault at the time that the job status was queried.",
Type: proto.ColumnType_INT,
Transform: transform.FromField("BytesTransferred"),
},
{
Name: "completion_date",
Description: "The date and time a backup job is completed.",
Type: proto.ColumnType_TIMESTAMP,
Transform: transform.FromField("CompletionDate"),
},
{
Name: "creation_date",
Description: "The date and time a backup job is created.",
Type: proto.ColumnType_TIMESTAMP,
Transform: transform.FromField("CreationDate"),
},
{
Name: "expected_completion_date",
Description: "The date and time a backup job is expected to be completed.",
Type: proto.ColumnType_TIMESTAMP,
Transform: transform.FromField("ExpectedCompletionDate"),
},
{
Name: "iam_role_arn",
Description: "The ARN of the IAM role that AWS Backup uses to create the target recovery point.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("IamRoleArn"),
},
{
Name: "is_parent",
Description: "A Boolean value that is returned as TRUE if the specified job is a parent job.",
Type: proto.ColumnType_BOOL,
Transform: transform.FromField("IsParentJob"),
},
{
Name: "parent_job_id",
Description: "The ID of the parent backup job, if there is one.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("ParentJobId"),
},
{
Name: "percent_done",
Description: "The percentage of job completion.",
Type: proto.ColumnType_STRING,
Transform: transform.FromField("PercentDone"),
},
{
Name: "start_by",
Description: "The date and time a backup job must be started before it is canceled.",
Type: proto.ColumnType_TIMESTAMP,
Transform: transform.FromField("StartBy"),
},
// Steampipe standard columns
{
Name: "title",
Description: resourceInterfaceDescription("title"),
Type: proto.ColumnType_STRING,
Transform: transform.FromField("BackupJobId"),
},
}),
}
}
func listAwsBackupJobs(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
// Create session
svc, err := BackupClient(ctx, d)
if err != nil {
plugin.Logger(ctx).Error("aws_backup_job.listAwsBackupJobs", "connection_error", err)
return nil, err
}
if svc == nil {
// Unsupported region, return no data
return nil, nil
}
// Limiting the results
queryResultLimit := int32(1000)
if d.QueryContext.Limit != nil {
queryResultLimit = min(queryResultLimit, int32(*d.QueryContext.Limit))
}
input := &backup.ListBackupJobsInput{
MaxResults: aws.Int32(queryResultLimit),
}
paginator := backup.NewListBackupJobsPaginator(svc, input, func(o *backup.ListBackupJobsPaginatorOptions) {
o.Limit = queryResultLimit
o.StopOnDuplicateToken = true
})
// List call
for paginator.HasMorePages() {
// apply rate limiting
d.WaitForListRateLimit(ctx)
output, err := paginator.NextPage(ctx)
if err != nil {
plugin.Logger(ctx).Error("aws_backup_job.listAwsBackupJobs", "api_error", err)
return nil, err
}
for _, items := range output.BackupJobs {
d.StreamListItem(ctx, items)
// Context can be cancelled due to manual cancellation or the limit has been hit
if d.RowsRemaining(ctx) == 0 {
return nil, nil
}
}
}
return nil, nil
}
func getAwsBackupJob(ctx context.Context, d *plugin.QueryData, h *plugin.HydrateData) (interface{}, error) {
// Create session
svc, err := BackupClient(ctx, d)
if err != nil {
plugin.Logger(ctx).Error("aws_backup_job.getAwsBackupJob", "connection_error", err)
return nil, err
}
if svc == nil {
// Unsupported region, return no data
return nil, nil
}
var backupJobID string
if h.Item != nil {
backupJob := h.Item.(types.BackupJob)
backupJobID = *backupJob.BackupJobId
} else {
backupJobID = d.EqualsQualString("job_id")
}
params := &backup.DescribeBackupJobInput{
BackupJobId: aws.String(backupJobID),
}
op, err := svc.DescribeBackupJob(ctx, params)
if err != nil {
plugin.Logger(ctx).Error("aws_backup_job.getAwsBackupJob", "api_error", err)
return nil, err
}
return op, nil
}