Skip to content

Commit e94399d

Browse files
Md. Emruz Hossaintamalsaha
authored andcommitted
Use restic 0.9.5 (#789)
* Use restic 0.9.5 * Fixed backup failure handing
1 parent e962a1c commit e94399d

38 files changed

+1112
-578
lines changed

go.mod

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,9 @@ require (
2727
github.com/spf13/cobra v0.0.3
2828
github.com/spf13/pflag v1.0.3
2929
github.com/stretchr/testify v1.3.0
30+
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f // indirect
31+
golang.org/x/net v0.0.0-20190514140710-3ec191127204 // indirect
32+
golang.org/x/sys v0.0.0-20190516110030-61b9204099cb // indirect
3033
gomodules.xyz/cert v1.0.0
3134
gomodules.xyz/envsubst v0.0.0-20190321051520-c745d52104af
3235
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect

go.sum

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ github.com/emicklei/go-restful v2.9.4+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT
9898
github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
9999
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
100100
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
101+
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
101102
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
102103
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
103104
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
@@ -353,6 +354,8 @@ golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd h1:sMHc2rZHuzQmrbVoSpt9Hg
353354
golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
354355
golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284 h1:rlLehGeYg6jfoyz/eDqDU1iRXLKfR42nnNh57ytKEWo=
355356
golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
357+
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo=
358+
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
356359
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
357360
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
358361
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -373,6 +376,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2eP
373376
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
374377
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
375378
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
379+
golang.org/x/net v0.0.0-20190514140710-3ec191127204 h1:4yG6GqBtw9C+UrLp6s2wtSniayy/Vd/3F7ffLE427XI=
380+
golang.org/x/net v0.0.0-20190514140710-3ec191127204/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
376381
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
377382
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
378383
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -395,6 +400,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
395400
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
396401
golang.org/x/sys v0.0.0-20190508100423-12bbe5a7a520 h1:5/ojcKo2vQ2eroPDFcBB9tuc4N42a5njs7UWP2jk3KU=
397402
golang.org/x/sys v0.0.0-20190508100423-12bbe5a7a520/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
403+
golang.org/x/sys v0.0.0-20190516110030-61b9204099cb h1:k07iPOt0d6nEnwXF+kHB+iEg+WSuKe/SOQuFM2QoD+E=
404+
golang.org/x/sys v0.0.0-20190516110030-61b9204099cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
398405
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
399406
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
400407
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

hack/docker/setup-es.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ source "$REPO_ROOT/hack/libbuild/common/public_image.sh"
1414

1515
APPSCODE_ENV=${APPSCODE_ENV:-dev}
1616
IMG=stash
17-
RESTIC_VER=${RESTIC_VER:-0.9.4}
17+
RESTIC_VER=${RESTIC_VER:-0.9.5}
1818
RESTIC_BRANCH=${RESTIC_BRANCH:-stash-0.4.2}
1919

2020
DIST=$REPO_ROOT/dist

hack/docker/setup-mongo.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ source "$REPO_ROOT/hack/libbuild/common/public_image.sh"
1414

1515
APPSCODE_ENV=${APPSCODE_ENV:-dev}
1616
IMG=stash
17-
NEW_RESTIC_VER=${NEW_RESTIC_VER:-0.9.4} # also update in restic wrapper library
17+
NEW_RESTIC_VER=${NEW_RESTIC_VER:-0.9.5} # also update in restic wrapper library
1818
RESTIC_BRANCH=${RESTIC_BRANCH:-stash-0.4.2}
1919

2020
DIST=$REPO_ROOT/dist

hack/docker/setup-mysql.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ source "$REPO_ROOT/hack/libbuild/common/public_image.sh"
1414

1515
APPSCODE_ENV=${APPSCODE_ENV:-dev}
1616
IMG=stash
17-
NEW_RESTIC_VER=${NEW_RESTIC_VER:-0.9.4} # also update in restic wrapper library
17+
NEW_RESTIC_VER=${NEW_RESTIC_VER:-0.9.5} # also update in restic wrapper library
1818
RESTIC_BRANCH=${RESTIC_BRANCH:-stash-0.4.2}
1919

2020
DIST=$REPO_ROOT/dist

hack/docker/setup-pg.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ source "$REPO_ROOT/hack/libbuild/common/public_image.sh"
1414

1515
APPSCODE_ENV=${APPSCODE_ENV:-dev}
1616
IMG=stash
17-
NEW_RESTIC_VER=${NEW_RESTIC_VER:-0.9.4} # also update in restic wrapper library
17+
NEW_RESTIC_VER=${NEW_RESTIC_VER:-0.9.5} # also update in restic wrapper library
1818
RESTIC_BRANCH=${RESTIC_BRANCH:-stash-0.4.2}
1919

2020
DIST=$REPO_ROOT/dist

hack/docker/setup.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ source "$REPO_ROOT/hack/libbuild/common/public_image.sh"
1515
APPSCODE_ENV=${APPSCODE_ENV:-dev}
1616
IMG=stash
1717
RESTIC_VER=${RESTIC_VER:-0.8.3}
18-
NEW_RESTIC_VER=${NEW_RESTIC_VER:-0.9.4} # also update in restic wrapper library
18+
NEW_RESTIC_VER=${NEW_RESTIC_VER:-0.9.5} # also update in restic wrapper library
1919
RESTIC_BRANCH=${RESTIC_BRANCH:-stash-0.4.2}
2020

2121
DIST=$REPO_ROOT/dist

pkg/backup/backupsession.go

Lines changed: 88 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,10 @@ func (c *BackupSessionController) runBackupSessionController(backupConfiguration
102102
}
103103
c.bsQueue.Run(stopCh)
104104

105+
// controller has started successfully. send successful backup setup metrics
106+
log.Infoln("Started BackupSession controller........")
107+
c.handleBackupSetupSuccess(backupConfiguration)
108+
105109
// wait until stop signal is sent.
106110
<-stopCh
107111
return nil
@@ -149,44 +153,54 @@ func (c *BackupSessionController) processBackupSession(key string) error {
149153
backupSession := obj.(*api_v1beta1.BackupSession)
150154
glog.Infof("Sync/Add/Update for Backup Session %s", backupSession.GetName())
151155

152-
// get respective BackupConfiguration for BackupSession
153-
backupConfiguration, err := c.StashClient.StashV1beta1().BackupConfigurations(backupSession.Namespace).Get(
154-
backupSession.Spec.BackupConfiguration.Name,
155-
metav1.GetOptions{},
156-
)
156+
err := c.startBackupProcess(backupSession)
157157
if err != nil {
158-
return fmt.Errorf("can't get BackupConfiguration for BackupSession %s/%s, reason: %s", backupSession.Namespace, backupSession.Name, err)
158+
e2 := c.handleBackupFailure(backupSession, err)
159+
err = errors.NewAggregate([]error{err, e2})
160+
// log failure. don't fail the container as it may interrupt user's service
161+
log.Infoln("failed to complete backup. Reason: ", err.Error())
159162
}
163+
}
164+
return nil
165+
}
160166

161-
// skip if BackupConfiguration paused
162-
if backupConfiguration.Spec.Paused {
163-
log.Infof("Skipping processing BackupSession %s/%s. Reason: Backup Configuration is paused.", backupSession.Namespace, backupSession.Name)
164-
return nil
165-
}
167+
func (c *BackupSessionController) startBackupProcess(backupSession *api_v1beta1.BackupSession) error {
168+
// get respective BackupConfiguration for BackupSession
169+
backupConfiguration, err := c.StashClient.StashV1beta1().BackupConfigurations(backupSession.Namespace).Get(
170+
backupSession.Spec.BackupConfiguration.Name,
171+
metav1.GetOptions{},
172+
)
173+
if err != nil {
174+
return fmt.Errorf("can't get BackupConfiguration for BackupSession %s/%s, reason: %s", backupSession.Namespace, backupSession.Name, err)
175+
}
166176

167-
host, err := util.GetHostName(backupConfiguration.Spec.Target)
168-
if err != nil {
169-
return err
170-
}
177+
// skip if BackupConfiguration paused
178+
if backupConfiguration.Spec.Paused {
179+
log.Infof("Skipping processing BackupSession %s/%s. Reason: Backup Configuration is paused.", backupSession.Namespace, backupSession.Name)
180+
return nil
181+
}
171182

172-
// if BackupSession already has been processed for this host then skip further processing
173-
if c.isBackupTakenForThisHost(backupSession, host) {
174-
log.Infof("Skip processing BackupSession %s/%s. Reason: BackupSession has been processed already for host %q\n", backupSession.Namespace, backupSession.Name, host)
175-
return nil
176-
}
183+
host, err := util.GetHostName(backupConfiguration.Spec.Target)
184+
if err != nil {
185+
return err
186+
}
177187

178-
// For Deployment, ReplicaSet and ReplicationController only leader pod is running this controller so no problem with restic repo lock.
179-
// For StatefulSet and DaemonSet all pods are running this controller and all will try to backup simultaneously. But, restic repository can be
180-
// locked by only one pod. So, we need a leader election to determine who will take backup first. Once backup is complete, the leader pod will
181-
// step down from leadership so that another replica can acquire leadership and start taking backup.
182-
switch backupConfiguration.Spec.Target.Ref.Kind {
183-
case apis.KindDeployment, apis.KindReplicaSet, apis.KindReplicationController, apis.KindDeploymentConfig:
184-
return c.backup(backupSession, backupConfiguration)
185-
default:
186-
return c.electBackupLeader(backupSession, backupConfiguration)
187-
}
188+
// if BackupSession already has been processed for this host then skip further processing
189+
if c.isBackupTakenForThisHost(backupSession, host) {
190+
log.Infof("Skip processing BackupSession %s/%s. Reason: BackupSession has been processed already for host %q\n", backupSession.Namespace, backupSession.Name, host)
191+
return nil
192+
}
193+
194+
// For Deployment, ReplicaSet and ReplicationController only leader pod is running this controller so no problem with restic repo lock.
195+
// For StatefulSet and DaemonSet all pods are running this controller and all will try to backup simultaneously. But, restic repository can be
196+
// locked by only one pod. So, we need a leader election to determine who will take backup first. Once backup is complete, the leader pod will
197+
// step down from leadership so that another replica can acquire leadership and start taking backup.
198+
switch backupConfiguration.Spec.Target.Ref.Kind {
199+
case apis.KindDeployment, apis.KindReplicaSet, apis.KindReplicationController, apis.KindDeploymentConfig:
200+
return c.backup(backupSession, backupConfiguration)
201+
default:
202+
return c.electBackupLeader(backupSession, backupConfiguration)
188203
}
189-
return nil
190204
}
191205

192206
func (c *BackupSessionController) backup(backupSession *api_v1beta1.BackupSession, backupConfiguration *api_v1beta1.BackupConfiguration) error {
@@ -295,18 +309,14 @@ func (c *BackupSessionController) electLeaderPod(backupConfiguration *api_v1beta
295309
RetryPeriod: 2 * time.Second,
296310
Callbacks: leaderelection.LeaderCallbacks{
297311
OnStartedLeading: func(ctx context.Context) {
298-
log.Infoln("Got leadership, preparing starting BackupSession controller")
312+
log.Infoln("Got leadership, starting BackupSession controller")
299313
// this pod is now leader. run BackupSession controller.
300314
err := c.runBackupSessionController(backupConfiguration, stopCh)
301315
if err != nil {
302-
e2 := c.HandleBackupFailure(err)
303-
if e2 != nil {
304-
err = errors.NewAggregate([]error{err, e2})
305-
}
316+
// send failure metric and fail the container so that it retry to setup
317+
c.HandleBackupSetupFailure(err)
306318
// step down from leadership so that other replicas can try to start BackupSession controller
307319
cancel()
308-
// fail the container so that it restart and re-try this process.
309-
log.Fatalln("failed to start BackupSession controller. Reason: ", err.Error())
310320
}
311321
},
312322
OnStoppedLeading: func() {
@@ -354,14 +364,12 @@ func (c *BackupSessionController) electBackupLeader(backupSession *api_v1beta1.B
354364
// run backup process
355365
err := c.backup(backupSession, backupConfiguration)
356366
if err != nil {
357-
e2 := c.HandleBackupFailure(err)
358-
if e2 != nil {
359-
err = errors.NewAggregate([]error{err, e2})
360-
}
367+
// send failure metrics and update BackupSession status
368+
err = c.handleBackupFailure(backupSession, err)
361369
// step down from leadership so that other replicas can start backup
362370
cancel()
363-
// fail the container so that it restart and re-try to backup
364-
log.Fatalln("failed to complete backup. Reason: ", err.Error())
371+
// log failure. don't fail the container as it may interrupt user's service
372+
log.Warningln("failed to complete backup. Reason: ", err.Error())
365373
}
366374
// backup process is complete. now, step down from leadership so that other replicas can start
367375
cancel()
@@ -374,12 +382,7 @@ func (c *BackupSessionController) electBackupLeader(backupSession *api_v1beta1.B
374382
return nil
375383
}
376384

377-
func (c *BackupSessionController) HandleBackupFailure(backupErr error) error {
378-
backupSession, err := c.StashClient.StashV1beta1().BackupSessions(c.Namespace).Get(c.BackupConfigurationName, metav1.GetOptions{})
379-
if err != nil {
380-
return err
381-
}
382-
385+
func (c *BackupSessionController) handleBackupFailure(backupSession *api_v1beta1.BackupSession, backupErr error) error {
383386
backupConfiguration, err := c.StashClient.StashV1beta1().BackupConfigurations(backupSession.Namespace).Get(backupSession.Spec.BackupConfiguration.Name, metav1.GetOptions{})
384387
if err != nil {
385388
return err
@@ -412,6 +415,41 @@ func (c *BackupSessionController) HandleBackupFailure(backupErr error) error {
412415
return nil
413416
}
414417

418+
func (c *BackupSessionController) HandleBackupSetupFailure(setupErr error) {
419+
backupConfiguration, err := c.StashClient.StashV1beta1().BackupConfigurations(c.Namespace).Get(c.BackupConfigurationName, metav1.GetOptions{})
420+
if err != nil {
421+
e2 := errors.NewAggregate([]error{setupErr, err})
422+
log.Fatalln("failed to setup backup process. Reason: ", e2.Error())
423+
}
424+
c.Metrics.Labels = append(c.Metrics.Labels, fmt.Sprintf("BackupConfiguration=%s", backupConfiguration.Name))
425+
if backupConfiguration.Spec.Target != nil {
426+
c.Metrics.Labels = append(c.Metrics.Labels, fmt.Sprintf("kind=%s", backupConfiguration.Spec.Target.Ref.Kind))
427+
c.Metrics.Labels = append(c.Metrics.Labels, fmt.Sprintf("name=%s", backupConfiguration.Spec.Target.Ref.Name))
428+
}
429+
// send prometheus metrics
430+
if c.Metrics.Enabled {
431+
err := restic.HandleBackupSetupMetrics(c.Metrics, setupErr)
432+
setupErr = errors.NewAggregate([]error{setupErr, err})
433+
}
434+
// fail the container so that it restart and re-try this process.
435+
log.Fatalln("failed to setup backup process. Reason: ", setupErr.Error())
436+
}
437+
438+
func (c *BackupSessionController) handleBackupSetupSuccess(backupConfiguration *api_v1beta1.BackupConfiguration) {
439+
c.Metrics.Labels = append(c.Metrics.Labels, fmt.Sprintf("BackupConfiguration=%s", backupConfiguration.Name))
440+
if backupConfiguration.Spec.Target != nil {
441+
c.Metrics.Labels = append(c.Metrics.Labels, fmt.Sprintf("kind=%s", backupConfiguration.Spec.Target.Ref.Kind))
442+
c.Metrics.Labels = append(c.Metrics.Labels, fmt.Sprintf("name=%s", backupConfiguration.Spec.Target.Ref.Name))
443+
}
444+
// send prometheus metrics
445+
if c.Metrics.Enabled {
446+
err := restic.HandleBackupSetupMetrics(c.Metrics, nil)
447+
if err != nil {
448+
log.Warningln("failed to send prometheus metrics. Reason: ", err.Error())
449+
}
450+
}
451+
}
452+
415453
func (c *BackupSessionController) writeBackupFailureEvent(backupSession *api_v1beta1.BackupSession, host string, err error) {
416454
// write failure event
417455
ref, rerr := reference.GetReference(stash_scheme.Scheme, backupSession)

pkg/cmds/run_backup.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ package cmds
33
import (
44
"time"
55

6-
"github.com/appscode/go/log"
76
"github.com/golang/glog"
87
"github.com/spf13/cobra"
98
"k8s.io/client-go/kubernetes"
@@ -52,9 +51,8 @@ func NewCmdRunBackup() *cobra.Command {
5251
con.Recorder = eventer.NewEventRecorder(con.K8sClient, backup.BackupEventComponent)
5352
con.Metrics.JobName = con.BackupConfigurationName
5453
if err = con.RunBackup(); err != nil {
55-
log.Errorln("failed to complete backup. Reason: ", err)
56-
//set BackupSession status "Failed", write event and prometheus metrics
57-
return con.HandleBackupFailure(err)
54+
// send setup failure metrics and fail the container so it restart to re-try
55+
con.HandleBackupSetupFailure(err)
5856
}
5957
return nil
6058
},

pkg/restic/commands.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ import (
1616
)
1717

1818
const (
19-
ResticCMD = "/bin/restic_0.9.4"
19+
ResticCMD = "/bin/restic_0.9.5"
2020
IONiceCMD = "/bin/ionice"
2121
NiceCMD = "/bin/nice"
2222
)
@@ -77,7 +77,7 @@ func (w *ResticWrapper) initRepositoryIfAbsent() ([]byte, error) {
7777

7878
func (w *ResticWrapper) backup(path, host string, tags []string) ([]byte, error) {
7979
log.Infoln("Backing up target data")
80-
args := []interface{}{"backup", path}
80+
args := []interface{}{"backup", path, "--quiet", "--json"}
8181
if host != "" {
8282
args = append(args, "--host")
8383
args = append(args, host)
@@ -104,7 +104,7 @@ func (w *ResticWrapper) backupFromStdin(options BackupOptions) ([]byte, error) {
104104
commands = append(commands, options.StdinPipeCommand)
105105
}
106106

107-
args := []interface{}{"backup", "--stdin"}
107+
args := []interface{}{"backup", "--stdin", "--quiet", "--json"}
108108
if options.StdinFileName != "" {
109109
args = append(args, "--stdin-filename")
110110
args = append(args, options.StdinFileName)
@@ -125,7 +125,7 @@ func (w *ResticWrapper) backupFromStdin(options BackupOptions) ([]byte, error) {
125125
func (w *ResticWrapper) cleanup(retentionPolicy v1alpha1.RetentionPolicy) ([]byte, error) {
126126
log.Infoln("Cleaning old snapshots according to retention policy")
127127

128-
args := []interface{}{"forget"}
128+
args := []interface{}{"forget", "--quiet", "--json"}
129129

130130
if retentionPolicy.KeepLast > 0 {
131131
args = append(args, string(v1alpha1.KeepLast))
@@ -252,7 +252,7 @@ func (w *ResticWrapper) stats() ([]byte, error) {
252252
log.Infoln("Reading repository status")
253253
args := w.appendCacheDirFlag([]interface{}{"stats"})
254254
args = w.appendMaxConnectionsFlag(args)
255-
args = append(args, "--mode=raw-data", "--quiet")
255+
args = append(args, "--quiet", "--json")
256256
args = w.appendCaCertFlag(args)
257257

258258
return w.run(Command{Name: ResticCMD, Args: args})

0 commit comments

Comments
 (0)