-
-
Notifications
You must be signed in to change notification settings - Fork 28
/
S3Repository.cs
202 lines (167 loc) · 8.07 KB
/
S3Repository.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
using System.Net;
using System.Text;
using Amazon;
using Amazon.S3;
using Amazon.S3.Model;
using Microsoft.Extensions.Logging;
using Velopack.Packaging;
namespace Velopack.Deployment;
public class S3DownloadOptions : RepositoryOptions
{
public string KeyId { get; set; }
public string Secret { get; set; }
public string Session { get; set; }
public string Region { get; set; }
public string Endpoint { get; set; }
public string Bucket { get; set; }
}
public class S3UploadOptions : S3DownloadOptions
{
public int KeepMaxReleases { get; set; }
}
public class S3Repository : DownRepository<S3DownloadOptions>, IRepositoryCanUpload<S3UploadOptions>
{
public S3Repository(ILogger logger) : base(logger)
{
}
public async Task UploadMissingAssetsAsync(S3UploadOptions options)
{
var build = BuildAssets.Read(options.ReleaseDir.FullName, options.Channel);
var client = GetS3Client(options);
Log.Info($"Preparing to upload {build.Files.Count} local assets to S3 endpoint {options.Endpoint ?? ""}");
var remoteReleases = await GetReleasesAsync(options);
Log.Info($"There are {remoteReleases.Assets.Length} assets in remote RELEASES file.");
var localEntries = build.GetReleaseEntries();
var releaseEntries = ReleaseEntryHelper.MergeAssets(localEntries, remoteReleases.Assets).ToArray();
Log.Info($"{releaseEntries.Length} merged local/remote releases.");
VelopackAsset[] toDelete = new VelopackAsset[0];
if (options.KeepMaxReleases > 0) {
var fullReleases = releaseEntries
.OrderByDescending(x => x.Version)
.Where(x => x.Type == VelopackAssetType.Full)
.ToArray();
if (fullReleases.Length > options.KeepMaxReleases) {
var minVersion = fullReleases[options.KeepMaxReleases - 1].Version;
toDelete = releaseEntries
.Where(x => x.Version < minVersion)
.ToArray();
releaseEntries = releaseEntries.Except(toDelete).ToArray();
Log.Info($"Retention policy (keepMaxReleases={options.KeepMaxReleases}) will delete {toDelete.Length} releases.");
} else {
Log.Info($"Retention policy (keepMaxReleases={options.KeepMaxReleases}) will not be applied, because there will only be {fullReleases.Length} full releases when this upload has completed.");
}
}
foreach (var asset in build.Files) {
await UploadFile(client, options.Bucket, Path.GetFileName(asset), new FileInfo(asset), true);
}
using var _1 = Utility.GetTempFileName(out var tmpReleases);
File.WriteAllText(tmpReleases, ReleaseEntryHelper.GetAssetFeedJson(new VelopackAssetFeed { Assets = releaseEntries }));
var releasesName = Utility.GetVeloReleaseIndexName(options.Channel);
await UploadFile(client, options.Bucket, releasesName, new FileInfo(tmpReleases), true);
#pragma warning disable CS0612 // Type or member is obsolete
#pragma warning disable CS0618 // Type or member is obsolete
var legacyKey = Utility.GetReleasesFileName(options.Channel);
using var _2 = Utility.GetTempFileName(out var tmpReleases2);
using (var fs = File.Create(tmpReleases2)) {
ReleaseEntry.WriteReleaseFile(releaseEntries.Select(ReleaseEntry.FromVelopackAsset), fs);
}
await UploadFile(client, options.Bucket, legacyKey, new FileInfo(tmpReleases2), true);
#pragma warning restore CS0618 // Type or member is obsolete
#pragma warning restore CS0612 // Type or member is obsolete
if (toDelete.Length > 0) {
Log.Info($"Retention policy about to delete {toDelete.Length} releases...");
foreach (var del in toDelete) {
//var metadata = await client.GetObjectMetadataAsync(options.Bucket, del.FileName);
await RetryAsync(() => client.DeleteObjectAsync(options.Bucket, del.FileName), "Deleting " + del.FileName);
}
}
Log.Info("Done.");
}
protected override async Task<VelopackAssetFeed> GetReleasesAsync(S3DownloadOptions options)
{
var releasesName = Utility.GetVeloReleaseIndexName(options.Channel);
var client = GetS3Client(options);
var ms = new MemoryStream();
try {
await RetryAsync(async () => {
using (var obj = await client.GetObjectAsync(options.Bucket, releasesName))
using (var stream = obj.ResponseStream) {
await stream.CopyToAsync(ms);
}
}, $"Fetching {releasesName}...");
} catch (AmazonS3Exception ex) when (ex.StatusCode == HttpStatusCode.NotFound) {
return new VelopackAssetFeed();
}
return VelopackAssetFeed.FromJson(Encoding.UTF8.GetString(ms.ToArray()));
}
protected override async Task SaveEntryToFileAsync(S3DownloadOptions options, VelopackAsset entry, string filePath)
{
var client = GetS3Client(options);
await RetryAsync(async () => {
using (var obj = await client.GetObjectAsync(options.Bucket, entry.FileName)) {
await obj.WriteResponseStreamToFileAsync(filePath, false, CancellationToken.None);
}
}, $"Downloading {entry.FileName}...");
}
private static AmazonS3Client GetS3Client(S3DownloadOptions options)
{
var config = new AmazonS3Config() { ServiceURL = options.Endpoint };
if (options.Endpoint != null) {
config.ServiceURL = options.Endpoint;
} else if (options.Region != null) {
config.RegionEndpoint = RegionEndpoint.GetBySystemName(options.Region);
} else {
throw new InvalidOperationException("Missing endpoint");
}
if (options.Session != null) {
return new AmazonS3Client(options.KeyId, options.Secret, options.Session, config);
} else {
return new AmazonS3Client(options.KeyId, options.Secret, config);
}
}
private async Task UploadFile(AmazonS3Client client, string bucket, string key, FileInfo f, bool overwriteRemote)
{
string deleteOldVersionId = null;
// try to detect an existing remote file of the same name
try {
var metadata = await client.GetObjectMetadataAsync(bucket, key);
var md5 = GetFileMD5Checksum(f.FullName);
var stored = metadata?.ETag?.Trim().Trim('"');
if (stored != null) {
if (stored.Equals(md5, StringComparison.InvariantCultureIgnoreCase)) {
Log.Info($"Upload file '{key}' skipped (already exists in remote)");
return;
} else if (overwriteRemote) {
Log.Info($"File '{key}' exists in remote, replacing...");
deleteOldVersionId = metadata.VersionId;
} else {
Log.Warn($"File '{key}' exists in remote and checksum does not match local file. Use 'overwrite' argument to replace remote file.");
return;
}
}
} catch {
// don't care if this check fails. worst case, we end up re-uploading a file that
// already exists. storage providers should prefer the newer file of the same name.
}
var req = new PutObjectRequest {
BucketName = bucket,
FilePath = f.FullName,
Key = key,
};
await RetryAsync(() => client.PutObjectAsync(req), "Uploading " + key);
if (deleteOldVersionId != null) {
try {
await RetryAsync(() => client.DeleteObjectAsync(bucket, key, deleteOldVersionId),
"Removing old version of " + key);
} catch { }
}
}
private static string GetFileMD5Checksum(string filePath)
{
var sha = System.Security.Cryptography.MD5.Create();
byte[] checksum;
using (var fs = File.OpenRead(filePath))
checksum = sha.ComputeHash(fs);
return BitConverter.ToString(checksum).Replace("-", String.Empty);
}
}