/
file.go
1726 lines (1520 loc) · 53.3 KB
/
file.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Mgmt
// Copyright (C) 2013-2024+ James Shubin and the project contributors
// Written by James Shubin <james@shubin.ca> and the project contributors
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//
// Additional permission under GNU GPL version 3 section 7
//
// If you modify this program, or any covered work, by linking or combining it
// with embedded mcl code and modules (and that the embedded mcl code and
// modules which link with this program, contain a copy of their source code in
// the authoritative form) containing parts covered by the terms of any other
// license, the licensors of this program grant you additional permission to
// convey the resulting work. Furthermore, the licensors of this program grant
// the original author, James Shubin, additional permission to update this
// additional permission if he deems it necessary to achieve the goals of this
// additional permission.
package resources
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"github.com/purpleidea/mgmt/engine"
"github.com/purpleidea/mgmt/engine/traits"
engineUtil "github.com/purpleidea/mgmt/engine/util"
"github.com/purpleidea/mgmt/lang/funcs/vars"
"github.com/purpleidea/mgmt/lang/interfaces"
"github.com/purpleidea/mgmt/lang/types"
"github.com/purpleidea/mgmt/util"
"github.com/purpleidea/mgmt/util/errwrap"
"github.com/purpleidea/mgmt/util/recwatch"
)
func init() {
engine.RegisterResource(KindFile, func() engine.Res { return &FileRes{} })
// const.res.file.state.exists = "exists"
// const.res.file.state.absent = "absent"
vars.RegisterResourceParams(KindFile, map[string]map[string]func() interfaces.Var{
ParamFileState: {
FileStateExists: func() interfaces.Var {
return &types.StrValue{
V: FileStateExists,
}
},
FileStateAbsent: func() interfaces.Var {
return &types.StrValue{
V: FileStateAbsent,
}
},
// TODO: consider removing this field entirely
"undefined": func() interfaces.Var {
return &types.StrValue{
V: FileStateUndefined, // empty string
}
},
},
})
}
const (
// KindFile is the kind string used to identify this resource.
KindFile = "file"
// ParamFileState is the name of the state field parameter.
ParamFileState = "state"
// FileStateExists is the string that represents that the file should be
// present.
FileStateExists = "exists"
// FileStateAbsent is the string that represents that the file should
// not exist.
FileStateAbsent = "absent"
// FileStateUndefined means the file state has not been specified.
// TODO: consider moving to *string and express this state as a nil.
FileStateUndefined = ""
// FileModeAllowAssign specifies whether we only use ugo=rwx style
// assignment (false) or if we also allow ugo+-rwx style too (true). I
// think that it's possibly illogical to allow imperative mode
// specifiers in a declarative language, so let's leave it off for now.
FileModeAllowAssign = false
)
// FileRes is a file and directory resource. Dirs are defined by names ending in
// a slash.
type FileRes struct {
traits.Base // add the base methods without re-implementation
traits.Edgeable
traits.GraphQueryable // allow others to query this res in the res graph
//traits.Groupable // TODO: implement this
traits.Recvable
traits.Reversible
init *engine.Init
// Path, which defaults to the name if not specified, represents the
// destination path for the file or directory being managed. It must be
// an absolute path, and as a result must start with a slash.
Path string `lang:"path" yaml:"path"`
// Dirname is used to override the path dirname. (The directory
// portion.)
Dirname string `lang:"dirname" yaml:"dirname"`
// Basename is used to override the path basename. (The file portion.)
Basename string `lang:"basename" yaml:"basename"`
// State specifies the desired state of the file. It can be either
// `exists` or `absent`. If you do not specify this, we will not be able
// to create or remove a file if it might be logical for another
// param to require that. Instead it will error. This means that this
// field is not implied by specifying some content or a mode.
State string `lang:"state" yaml:"state"`
// Content specifies the file contents to use. If this is nil, they are
// left undefined. It cannot be combined with the Source or Fragments
// parameters.
Content *string `lang:"content" yaml:"content"`
// Source specifies the source contents for the file resource. It cannot
// be combined with the Content or Fragments parameters. It must be an
// absolute path, and it can point to a file or a directory. If it
// points to a file, then that will will be copied throuh directly. If
// it points to a directory, then it will copy the directory "rsync
// style" onto the file destination. As a result, if this is a file,
// then the main file res must be a file, and if it is a directory, then
// this must be a directory. To meaningfully copy a full directory, you
// also need to specify the Recurse parameter, which is currently
// required. If you want an existing dir to be turned into a file (or
// vice-versa) instead of erroring, then you'll also need to specify the
// Force parameter. If source is undefined and the file path is a
// directory, then a directory will be created. If left undefined, and
// combined with the Purge option too, then any unmanaged file in this
// dir will be removed.
Source string `lang:"source" yaml:"source"`
// Fragments specifies that the file is built from a list of individual
// files. If one of the files is a directory, then the list of files in
// that directory are the fragments to combine. Multiple of these can be
// used together, although most simple cases will probably only either
// involve a single directory path or a fixed list of individual files.
// All paths are absolute and as a result must start with a slash. The
// directories (if any) must end with a slash as well. This cannot be
// combined with the Content or Source parameters. If a file with param
// is reversed, the reversed file is one that has `Content` set instead.
// Automatic edges will be added from these fragments. This currently
// isn't recursive in that if a fragment is a directory, this only
// searches one level deep at the moment.
Fragments []string `lang:"fragments" yaml:"fragments"`
// Owner specifies the file owner. You can specify either the string
// name, or a string representation of the owner integer uid.
Owner string `lang:"owner" yaml:"owner"`
// Group specifies the file group. You can specify either the string
// name, or a string representation of the group integer gid.
Group string `lang:"group" yaml:"group"`
// Mode is the mode of the file as a string representation of the octal
// form or symbolic form.
Mode string `lang:"mode" yaml:"mode"`
// Recurse specifies if you want to work recursively on the resource. It
// is used when copying a source directory, or to determine if a watch
// should be recursive or not. When making a directory, this is required
// if you'd need the parent directories to be made as well. (Analogous
// to the `mkdir -p` option.)
// FIXME: There are some unimplemented cases where we should look at it.
Recurse bool `lang:"recurse" yaml:"recurse"`
// Force must be set if we want to perform an unusual operation, such as
// changing a file into a directory or vice-versa.
Force bool `lang:"force" yaml:"force"`
// Purge specifies that when true, any unmanaged file in this file
// directory will be removed. As a result, this file resource must be a
// directory. This isn't particularly meaningful if you don't also set
// Recurse to true. This doesn't work with Content or Fragments.
Purge bool `lang:"purge" yaml:"purge"`
sha256sum string
}
// getPath returns the actual path to use for this resource. It computes this
// after analysis of the Path, Dirname and Basename values. Dirs end with slash.
// TODO: memoize the result if this seems important.
func (obj *FileRes) getPath() string {
p := obj.Path
if obj.Path == "" { // use the name as the path default if missing
p = obj.Name()
}
d := util.Dirname(p)
b := util.Basename(p)
if obj.Dirname == "" && obj.Basename == "" {
return p
}
if obj.Dirname == "" {
return d + obj.Basename
}
if obj.Basename == "" {
return obj.Dirname + b
}
// if obj.dirname != "" && obj.basename != ""
return obj.Dirname + obj.Basename
}
// isDir is a helper function to specify whether the path should be a dir.
func (obj *FileRes) isDir() bool {
return strings.HasSuffix(obj.getPath(), "/") // dirs have trailing slashes
}
// mode returns the file permission specified on the graph. It doesn't handle
// the case where the mode is not specified. The caller should check obj.Mode is
// not empty.
func (obj *FileRes) mode() (os.FileMode, error) {
if n, err := strconv.ParseInt(obj.Mode, 8, 32); err == nil {
return os.FileMode(n), nil
}
// Try parsing symbolically by first getting the files current mode.
stat, err := os.Stat(obj.getPath())
if err != nil {
return os.FileMode(0), errwrap.Wrapf(err, "failed to get the current file mode")
}
modes := strings.Split(obj.Mode, ",")
m, err := engineUtil.ParseSymbolicModes(modes, stat.Mode(), FileModeAllowAssign)
if err != nil {
return os.FileMode(0), errwrap.Wrapf(err, "mode should be an octal number or symbolic mode (%s)", obj.Mode)
}
return os.FileMode(m), nil
}
// Default returns some sensible defaults for this resource.
func (obj *FileRes) Default() engine.Res {
return &FileRes{
//State: FileStateUndefined, // the default must be undefined!
}
}
// Validate reports any problems with the struct definition.
func (obj *FileRes) Validate() error {
if obj.getPath() == "" {
return fmt.Errorf("path is empty")
}
if obj.Dirname != "" && !strings.HasSuffix(obj.Dirname, "/") {
return fmt.Errorf("dirname must end with a slash")
}
if strings.HasPrefix(obj.Basename, "/") {
return fmt.Errorf("basename must not start with a slash")
}
if !strings.HasPrefix(obj.getPath(), "/") {
return fmt.Errorf("resultant path must be absolute")
}
if obj.State != FileStateExists && obj.State != FileStateAbsent && obj.State != FileStateUndefined {
return fmt.Errorf("the State is invalid")
}
isContent := obj.Content != nil
isSrc := obj.Source != ""
isFrag := len(obj.Fragments) > 0
if (isContent && isSrc) || (isSrc && isFrag) || (isFrag && isContent) {
return fmt.Errorf("can only specify one of Content, Source, and Fragments")
}
if obj.State == FileStateAbsent && (isContent || isSrc || isFrag) {
return fmt.Errorf("can't specify file Content, Source, or Fragments when State is %s", FileStateAbsent)
}
// The path and Source must either both be dirs or both not be.
srcIsDir := strings.HasSuffix(obj.Source, "/")
if isSrc && (obj.isDir() != srcIsDir) {
return fmt.Errorf("the path and Source must either both be dirs or both not be")
}
if obj.isDir() && (isContent || isFrag) { // makes no sense
return fmt.Errorf("can't specify Content or Fragments when creating a Dir")
}
// TODO: is this really a requirement that we want to enforce?
if isSrc && obj.isDir() && srcIsDir && !obj.Recurse {
return fmt.Errorf("you'll want to Recurse when you have a Source dir to copy")
}
// TODO: do we want to enforce this sort of thing?
if obj.Purge && !obj.Recurse {
return fmt.Errorf("you'll want to Recurse when you have a Purge to do")
}
if isSrc && !obj.isDir() && !srcIsDir && obj.Recurse {
return fmt.Errorf("you can't recurse when copying a single file")
}
for _, frag := range obj.Fragments {
// absolute paths begin with a slash
if !strings.HasPrefix(frag, "/") {
return fmt.Errorf("the frag (`%s`) isn't an absolute path", frag)
}
}
if obj.Purge && (isContent || isFrag) {
return fmt.Errorf("can't combine Purge with Content or Fragments")
}
// XXX: should this work with obj.Purge && obj.Source != "" or not?
//if obj.Purge && obj.Source != "" {
// return fmt.Errorf("can't Purge when Source is specified")
//}
// TODO: should we silently ignore these errors or include them?
//if obj.State == FileStateAbsent && obj.Owner != "" {
// return fmt.Errorf("can't specify Owner for an absent file")
//}
//if obj.State == FileStateAbsent && obj.Group != "" {
// return fmt.Errorf("can't specify Group for an absent file")
//}
if obj.Owner != "" || obj.Group != "" {
fileInfo, err := os.Stat("/") // pick root just to do this test
if err != nil {
return fmt.Errorf("can't stat root to get system information")
}
_, ok := fileInfo.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("can't set Owner or Group on this platform")
}
}
if _, err := engineUtil.GetUID(obj.Owner); obj.Owner != "" && err != nil {
return err
}
if _, err := engineUtil.GetGID(obj.Group); obj.Group != "" && err != nil {
return err
}
// TODO: should we silently ignore this error or include it?
//if obj.State == FileStateAbsent && obj.Mode != "" {
// return fmt.Errorf("can't specify Mode for an absent file")
//}
if obj.Mode != "" {
if _, err := obj.mode(); err != nil {
return err
}
}
return nil
}
// Init runs some startup code for this resource.
func (obj *FileRes) Init(init *engine.Init) error {
obj.init = init // save for later
obj.sha256sum = ""
return nil
}
// Cleanup is run by the engine to clean up after the resource is done.
func (obj *FileRes) Cleanup() error {
return nil
}
// Watch is the primary listener for this resource and it outputs events. This
// one is a file watcher for files and directories. Modify with caution, it is
// probably important to write some test cases first! If the Watch returns an
// error, it means that something has gone wrong, and it must be restarted. On a
// clean exit it returns nil.
func (obj *FileRes) Watch(ctx context.Context) error {
// TODO: chan *recwatch.Event instead?
inputEvents := make(chan recwatch.Event)
defer close(inputEvents)
wg := &sync.WaitGroup{}
defer wg.Wait()
exit := make(chan struct{})
// TODO: should this be after (later in the file) than the `defer recWatcher.Close()` ?
// TODO: should this be after (later in the file) the `defer recWatcher.Close()` ?
defer close(exit)
recWatcher, err := recwatch.NewRecWatcher(obj.getPath(), obj.Recurse)
if err != nil {
return err
}
defer recWatcher.Close()
// watch the various inputs to this file resource too!
if obj.Source != "" {
// This block is virtually identical to the below one.
recurse := strings.HasSuffix(obj.Source, "/") // isDir
rw, err := recwatch.NewRecWatcher(obj.Source, recurse)
if err != nil {
return err
}
defer rw.Close()
wg.Add(1)
go func() {
defer wg.Done()
for {
// TODO: *recwatch.Event instead?
var event recwatch.Event
var ok bool
var shutdown bool
select {
case event, ok = <-rw.Events(): // recv
case <-exit: // unblock
return
}
if !ok {
err := fmt.Errorf("channel shutdown")
event = recwatch.Event{Error: err}
shutdown = true
}
select {
case inputEvents <- event: // send
if shutdown { // optimization to free early
return
}
case <-exit: // unblock
return
}
}
}()
}
for _, frag := range obj.Fragments {
// This block is virtually identical to the above one.
recurse := false // TODO: is it okay for depth==1 dirs?
//recurse := strings.HasSuffix(frag, "/") // isDir
rw, err := recwatch.NewRecWatcher(frag, recurse)
if err != nil {
return err
}
defer rw.Close()
wg.Add(1)
go func() {
defer wg.Done()
for {
// TODO: *recwatch.Event instead?
var event recwatch.Event
var ok bool
var shutdown bool
select {
case event, ok = <-rw.Events(): // recv
case <-exit: // unblock
return
}
if !ok {
err := fmt.Errorf("channel shutdown")
event = recwatch.Event{Error: err}
shutdown = true
}
select {
case inputEvents <- event: // send
if shutdown { // optimization to free early
return
}
case <-exit: // unblock
return
}
}
}()
}
obj.init.Running() // when started, notify engine that we're running
var send = false // send event?
for {
if obj.init.Debug {
obj.init.Logf("watching: %s", obj.getPath()) // attempting to watch...
}
select {
case event, ok := <-recWatcher.Events():
if !ok { // channel shutdown
// TODO: Should this be an error? Previously it
// was a `return nil`, and i'm not sure why...
//return nil
return fmt.Errorf("unexpected close")
}
if err := event.Error; err != nil {
return errwrap.Wrapf(err, "unknown %s watcher error", obj)
}
if obj.init.Debug { // don't access event.Body if event.Error isn't nil
obj.init.Logf("event(%s): %v", event.Body.Name, event.Body.Op)
}
send = true
case event, ok := <-inputEvents:
if !ok {
return fmt.Errorf("unexpected close")
}
if err := event.Error; err != nil {
return errwrap.Wrapf(err, "unknown %s input watcher error", obj)
}
if obj.init.Debug { // don't access event.Body if event.Error isn't nil
obj.init.Logf("input event(%s): %v", event.Body.Name, event.Body.Op)
}
send = true
case <-ctx.Done(): // closed by the engine to signal shutdown
return nil
}
// do all our event sending all together to avoid duplicate msgs
if send {
send = false
obj.init.Event() // notify engine of an event (this can block)
}
}
}
// fileCheckApply is the CheckApply operation for a source and destination file.
// It can accept an io.Reader as the source, which can be a regular file, or it
// can be a bytes Buffer struct. It can take an input sha256 hash to use instead
// of computing the source data hash, and it returns the computed value if this
// function reaches that stage. As usual, it respects the apply action variable,
// and has some symmetry with the main CheckApply function.
func (obj *FileRes) fileCheckApply(ctx context.Context, apply bool, src io.ReadSeeker, dst string, sha256sum string) (string, bool, error) {
// TODO: does it make sense to switch dst to an io.Writer ?
// TODO: use obj.Force when dealing with symlinks and other file types!
if obj.init.Debug {
obj.init.Logf("fileCheckApply: %v -> %s", src, dst)
}
srcFile, isFile := src.(*os.File)
_, isBytes := src.(*bytes.Reader) // supports seeking!
if !isFile && !isBytes {
return "", false, fmt.Errorf("can't open src as either file or buffer")
}
var srcStat os.FileInfo
if isFile {
var err error
srcStat, err = srcFile.Stat()
if err != nil {
return "", false, err
}
// TODO: deal with symlinks
if !srcStat.Mode().IsRegular() { // can't copy non-regular files or dirs
return "", false, fmt.Errorf("non-regular src file: %s (%q)", srcStat.Name(), srcStat.Mode())
}
}
dstFile, err := os.Open(dst)
if err != nil && !os.IsNotExist(err) { // ignore ErrNotExist errors
return "", false, err
}
dstClose := func() error {
return dstFile.Close() // calling this twice is safe :)
}
defer dstClose()
dstExists := !os.IsNotExist(err)
// Optimization: we shouldn't be making the file, it happens in
// stateCheckApply, but we skip doing it there in order to do it here,
// unless we're undefined, and then we shouldn't force it!
if !dstExists && obj.State == FileStateUndefined {
return "", false, err
}
dstStat, err := dstFile.Stat()
if err != nil && dstExists {
return "", false, err
}
if dstExists && dstStat.IsDir() { // oops, dst is a dir, and we want a file...
if !obj.Force {
return "", false, fmt.Errorf("can't force dir into file: %s", dst)
}
if !apply {
return "", false, nil
}
cleanDst := path.Clean(dst)
if cleanDst == "" || cleanDst == "/" {
return "", false, fmt.Errorf("don't want to remove root") // safety
}
// FIXME: respect obj.Recurse here...
// there is a dir here, where we want a file...
obj.init.Logf("fileCheckApply: removing (force): %s", cleanDst)
if err := os.RemoveAll(cleanDst); err != nil { // dangerous ;)
return "", false, err
}
dstExists = false // now it's gone!
} else if err == nil {
if !dstStat.Mode().IsRegular() {
return "", false, fmt.Errorf("non-regular dst file: %s (%q)", dstStat.Name(), dstStat.Mode())
}
if isFile && os.SameFile(srcStat, dstStat) { // same inode, we're done!
return "", true, nil
}
}
if dstExists { // if dst doesn't exist, no need to compare hashes
// hash comparison (efficient because we can cache hash of content str)
if sha256sum == "" { // cache is invalid
hash := sha256.New()
// TODO: file existence test?
if _, err := io.Copy(hash, src); err != nil {
return "", false, err
}
sha256sum = hex.EncodeToString(hash.Sum(nil))
// since we re-use this src handler below, it is
// *critical* to seek to 0, or we'll copy nothing!
if n, err := src.Seek(0, 0); err != nil || n != 0 {
return sha256sum, false, err
}
}
// dst hash
hash := sha256.New()
if _, err := io.Copy(hash, dstFile); err != nil {
return "", false, err
}
if h := hex.EncodeToString(hash.Sum(nil)); h == sha256sum {
return sha256sum, true, nil // same!
}
}
// state is not okay, no work done, exit, but without error
if !apply {
return sha256sum, false, nil
}
if obj.init.Debug {
obj.init.Logf("fileCheckApply: apply: %v -> %s", src, dst)
}
dstClose() // unlock file usage so we can write to it
dstFile, err = os.Create(dst)
if err != nil {
return sha256sum, false, err
}
defer dstFile.Close() // TODO: is this redundant because of the earlier defered Close() ?
if isFile { // set mode because it's a new file
if err := dstFile.Chmod(srcStat.Mode()); err != nil {
return sha256sum, false, err
}
}
// TODO: attempt to reflink with Splice() and int(file.Fd()) as input...
// syscall.Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
// TODO: should we offer a way to cancel the copy on ^C ?
if obj.init.Debug {
obj.init.Logf("fileCheckApply: copy: %v -> %s", src, dst)
}
if n, err := io.Copy(dstFile, src); err != nil {
return sha256sum, false, err
} else if obj.init.Debug {
obj.init.Logf("fileCheckApply: copied: %v", n)
}
return sha256sum, false, dstFile.Sync()
}
// dirCheckApply is the CheckApply operation for an empty directory.
func (obj *FileRes) dirCheckApply(ctx context.Context, apply bool) (bool, error) {
// check if the path exists and is a directory
fileInfo, err := os.Stat(obj.getPath())
if err != nil && !os.IsNotExist(err) {
return false, errwrap.Wrapf(err, "stat error on file resource")
}
if err == nil && fileInfo.IsDir() {
return true, nil // already a directory, nothing to do
}
if err == nil && !fileInfo.IsDir() && !obj.Force {
return false, fmt.Errorf("can't force file into dir: %s", obj.getPath())
}
if !apply {
return false, nil
}
// the path exists and is not a directory
// delete the file if force is given
if err == nil && !fileInfo.IsDir() {
obj.init.Logf("dirCheckApply: removing (force): %s", obj.getPath())
if err := os.Remove(obj.getPath()); err != nil {
return false, err
}
}
// create the empty directory
mode := os.ModePerm
if obj.Mode != "" {
if mode, err = obj.mode(); err != nil {
return false, err
}
}
if obj.Recurse {
// TODO: add recurse limit here
return false, os.MkdirAll(obj.getPath(), mode)
}
return false, os.Mkdir(obj.getPath(), mode)
}
// syncCheckApply is the CheckApply operation for a source and destination dir.
// It is recursive and can create directories directly, and files via the usual
// fileCheckApply method. It returns checkOK and error as is normally expected.
// If excludes is specified, none of those files there will be deleted by this,
// with the exception that a sync *can* convert a file to a dir, or vice-versa.
func (obj *FileRes) syncCheckApply(ctx context.Context, apply bool, src, dst string, excludes []string) (bool, error) {
if obj.init.Debug {
obj.init.Logf("syncCheckApply: %s -> %s", src, dst)
}
// an src of "" is now supported, if dst is a dir
if dst == "" {
return false, fmt.Errorf("the src and dst must not be empty")
}
checkOK := true
// TODO: handle ./ cases or ../ cases that need cleaning ?
srcIsDir := strings.HasSuffix(src, "/")
dstIsDir := strings.HasSuffix(dst, "/")
if srcIsDir != dstIsDir && src != "" {
return false, fmt.Errorf("the src and dst must be both either files or directories")
}
if src == "" && !dstIsDir {
return false, fmt.Errorf("dst must be a dir if we have an empty src")
}
if !srcIsDir && !dstIsDir && src != "" {
if obj.init.Debug {
obj.init.Logf("syncCheckApply: %s -> %s", src, dst)
}
fin, err := os.Open(src)
if err != nil {
if obj.init.Debug && os.IsNotExist(err) { // if we get passed an empty src
obj.init.Logf("syncCheckApply: missing src: %s", src)
}
return false, err
}
_, checkOK, err := obj.fileCheckApply(ctx, apply, fin, dst, "")
if err != nil {
fin.Close()
return false, err
}
return checkOK, fin.Close()
}
// else: if srcIsDir && dstIsDir
smartSrc := make(map[string]FileInfo)
if src != "" {
srcFiles, err := ReadDir(src) // if src does not exist...
if err != nil && !os.IsNotExist(err) { // an empty map comes out below!
return false, err
}
smartSrc = mapPaths(srcFiles)
obj.init.Logf("syncCheckApply: srcFiles: %v", srcFiles)
}
dstFiles, err := ReadDir(dst)
if err != nil && !os.IsNotExist(err) {
return false, err
}
smartDst := mapPaths(dstFiles)
obj.init.Logf("syncCheckApply: dstFiles: %v", dstFiles)
for relPath, fileInfo := range smartSrc {
absSrc := fileInfo.AbsPath // absolute path
absDst := dst + relPath // absolute dest
if _, exists := smartDst[relPath]; !exists {
if fileInfo.IsDir() {
if !apply { // only checking and not identical!
return false, nil
}
// file exists, but we want a dir: we need force
// we check for the file w/o the smart dir slash
relPathFile := strings.TrimSuffix(relPath, "/")
if _, ok := smartDst[relPathFile]; ok {
absCleanDst := path.Clean(absDst)
// TODO: can we fail this before `!apply`?
if !obj.Force {
return false, fmt.Errorf("can't force file into dir: %s", absCleanDst)
}
if absCleanDst == "" || absCleanDst == "/" {
return false, fmt.Errorf("don't want to remove root") // safety
}
obj.init.Logf("syncCheckApply: removing (force): %s", absCleanDst)
if err := os.Remove(absCleanDst); err != nil {
return false, err
}
delete(smartDst, relPathFile) // rm from purge list
}
if obj.init.Debug {
obj.init.Logf("syncCheckApply: mkdir -m %s '%s'", fileInfo.Mode(), absDst)
}
if err := os.Mkdir(absDst, fileInfo.Mode()); err != nil {
return false, err
}
checkOK = false // we did some work
}
// if we're a regular file, the recurse will create it
}
if obj.init.Debug {
obj.init.Logf("syncCheckApply: recurse: %s -> %s", absSrc, absDst)
}
if obj.Recurse {
if c, err := obj.syncCheckApply(ctx, apply, absSrc, absDst, excludes); err != nil { // recurse
return false, errwrap.Wrapf(err, "syncCheckApply: recurse failed")
} else if !c { // don't let subsequent passes make this true
checkOK = false
}
}
if !apply && !checkOK { // check failed, and no apply to do, so exit!
return false, nil
}
delete(smartDst, relPath) // rm from purge list
}
if !apply && len(smartDst) > 0 { // we know there are files to remove!
return false, nil // so just exit now
}
// isExcluded specifies if the path is part of an excluded path. For
// example, if we exclude /tmp/foo/bar from deletion, then we don't want
// to delete /tmp/foo/bar *or* /tmp/foo/ *or* /tmp/ b/c they're parents.
isExcluded := func(p string) bool {
for _, x := range excludes {
if util.HasPathPrefix(x, p) {
return true
}
}
return false
}
// any files that now remain in smartDst need to be removed...
for relPath, fileInfo := range smartDst {
absSrc := src + relPath // absolute dest (should not exist!)
absDst := fileInfo.AbsPath // absolute path (should get removed)
absCleanDst := path.Clean(absDst)
if absCleanDst == "" || absCleanDst == "/" {
return false, fmt.Errorf("don't want to remove root") // safety
}
// FIXME: respect obj.Recurse here...
// NOTE: we could use os.RemoveAll instead of recursing, but I
// think the symmetry is more elegant and correct here for now
// Avoiding this is also useful if we had a recurse limit arg!
if true { // switch
if isExcluded(absDst) { // skip removing excluded files
continue
}
obj.init.Logf("syncCheckApply: removing: %s", absCleanDst)
if apply {
if err := os.RemoveAll(absCleanDst); err != nil { // dangerous ;)
return false, err
}
checkOK = false
}
continue
}
_ = absSrc
//obj.init.Logf("syncCheckApply: recurse rm: %s -> %s", absSrc, absDst)
//if c, err := obj.syncCheckApply(ctx, apply, absSrc, absDst, excludes); err != nil {
// return false, errwrap.Wrapf(err, "syncCheckApply: recurse rm failed")
//} else if !c { // don't let subsequent passes make this true
// checkOK = false
//}
//if isExcluded(absDst) { // skip removing excluded files
// continue
//}
//obj.init.Logf("syncCheckApply: removing: %s", absCleanDst)
//if apply { // safety
// if err := os.Remove(absCleanDst); err != nil {
// return false, err
// }
// checkOK = false
//}
}
return checkOK, nil
}
// stateCheckApply performs a CheckApply of the file state to create or remove
// an empty file or directory.
func (obj *FileRes) stateCheckApply(ctx context.Context, apply bool) (bool, error) {
if obj.State == FileStateUndefined { // state is not specified
return true, nil
}
_, err := os.Stat(obj.getPath())
if err != nil && !os.IsNotExist(err) {
return false, errwrap.Wrapf(err, "could not stat file")
}
if obj.State == FileStateAbsent && os.IsNotExist(err) {
return true, nil
}
if obj.State == FileStateExists && err == nil {
return true, nil
}
// state is not okay, no work done, exit, but without error
if !apply {
return false, nil
}
if obj.State == FileStateAbsent { // remove
p := obj.getPath()
if p == "" {
// programming error?
return false, fmt.Errorf("can't remove empty path") // safety
}
if p == "/" {
return false, fmt.Errorf("don't want to remove root") // safety
}
obj.init.Logf("stateCheckApply: removing: %s", p)
// TODO: add recurse limit here
if obj.Recurse {
return false, os.RemoveAll(p) // dangerous ;)
}
return false, os.Remove(p)
}
// we need to make a file or a directory now
if obj.isDir() {
return obj.dirCheckApply(ctx, apply)
}
// Optimization: we shouldn't even look at obj.Content here, but we can
// skip this empty file creation here since we know we're going to be
// making it there anyways. This way we save the extra fopen noise.
if obj.Content != nil || len(obj.Fragments) > 0 {
return false, nil // pretend we actually made it
}
// Create an empty file to ensure one exists. Don't O_TRUNC it, in case
// one is magically created right after our exists test. The chmod used
// is what is used by the os.Create function.
// TODO: is using O_EXCL okay?
f, err := os.OpenFile(obj.getPath(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return false, errwrap.Wrapf(err, "problem creating empty file")
}
if err := f.Close(); err != nil {
return false, errwrap.Wrapf(err, "problem closing empty file")
}
return false, nil // defer the Content != nil work to later...
}
// contentCheckApply performs a CheckApply for the file content.
func (obj *FileRes) contentCheckApply(ctx context.Context, apply bool) (bool, error) {
if obj.init.Debug {
obj.init.Logf("contentCheckApply(%t)", apply)
}
// content is not defined, leave it alone...
if obj.Content == nil {
return true, nil