Skip to content

Commit

Permalink
indentation issues fixed (unfaithful vim!)
Browse files Browse the repository at this point in the history
  • Loading branch information
Dabz committed Mar 7, 2016
1 parent c2e9f78 commit 08ec120
Show file tree
Hide file tree
Showing 12 changed files with 975 additions and 975 deletions.
108 changes: 54 additions & 54 deletions src/github.com/Dabz/mongobackup/backup.go
Expand Up @@ -5,15 +5,15 @@
** Login gaspar_d <d.gasparina@gmail.com>
**
** Started on Wed 23 Dec 17:39:06 2015 gaspar_d
** Last update Wed 6 Jan 07:43:02 2016 gaspar_d
** Last update Mon 7 Mar 16:52:38 2016 gaspar_d
*/

package mongobackup

import (
"os"
"time"
"strconv"
"strconv"
"gopkg.in/mgo.v2/bson"
)

Expand All @@ -23,7 +23,7 @@ import (
func (e *BackupEnv) PerformBackup() {
backupId := strconv.Itoa(e.homeval.content.Sequence)
e.backupdirectory = e.Options.Directory + "/" + backupId;
e.ensureSecondary();
e.ensureSecondary();

if (! e.Options.Incremental) {
e.performFullBackup(backupId);
Expand All @@ -37,7 +37,7 @@ func (e *BackupEnv) PerformBackup() {
// by default will lock the db with fsyncLock
// will perform a rs.stepDown() if the node is primary
func (e *BackupEnv) performFullBackup(backupId string) {
newEntry := BackupEntry{}
newEntry := BackupEntry{}
e.fetchDBPath();
e.info.Printf("Performing full backup of: %s", e.dbpath);

Expand All @@ -58,48 +58,48 @@ func (e *BackupEnv) performFullBackup(backupId string) {
os.Exit(1);
}

/* Dumping oplog for PIT recovery */
firstOplogEntries := e.getOplogFirstEntries()["ts"].(bson.MongoTimestamp)
/* Dumping oplog for PIT recovery */
firstOplogEntries := e.getOplogFirstEntries()["ts"].(bson.MongoTimestamp)

if (firstOplogEntries > e.homeval.lastOplog) {
e.warning.Printf("Can not find a common point in the oplog")
e.warning.Printf("point in time restore is not available before this backup")
newEntry.LastOplog = e.getOplogLastEntries()["ts"].(bson.MongoTimestamp)
newEntry.FirstOplog = firstOplogEntries
newEntry.LastOplog = e.getOplogLastEntries()["ts"].(bson.MongoTimestamp)
newEntry.FirstOplog = firstOplogEntries
} else {
cursor := e.getOplogEntries(e.homeval.lastOplog)
err, _, fop, lop := e.BackupOplogToDir(cursor, e.backupdirectory)

if (err != nil) {
e.error.Printf("Error while dumping oplog to %s (%s)", e.backupdirectory, err)
e.CleanupBackupEnv()
os.Exit(1)
}

newEntry.LastOplog = lop
newEntry.FirstOplog = fop
}

newEntry.Id = backupId
newEntry.Ts = time.Now()
newEntry.Source = e.dbpath
newEntry.Dest = e.backupdirectory
newEntry.Kind = e.Options.Kind
newEntry.Type = "full"
newEntry.Compress = e.Options.Compress
e.homeval.AddNewEntry(newEntry)
e.homeval.Flush()
cursor := e.getOplogEntries(e.homeval.lastOplog)
err, _, fop, lop := e.BackupOplogToDir(cursor, e.backupdirectory)

if (err != nil) {
e.error.Printf("Error while dumping oplog to %s (%s)", e.backupdirectory, err)
e.CleanupBackupEnv()
os.Exit(1)
}

newEntry.LastOplog = lop
newEntry.FirstOplog = fop
}

newEntry.Id = backupId
newEntry.Ts = time.Now()
newEntry.Source = e.dbpath
newEntry.Dest = e.backupdirectory
newEntry.Kind = e.Options.Kind
newEntry.Type = "full"
newEntry.Compress = e.Options.Compress
e.homeval.AddNewEntry(newEntry)
e.homeval.Flush()


e.info.Printf("Success, %fGB of data has been saved in %s", sizeGb, e.backupdirectory);

/* End of critical path */
if (e.Options.Fsynclock) {
e.info.Printf("Unlocking the database")
if (e.mongoFsyncUnLock() != nil) {
e.CleanupBackupEnv();
os.Exit(1);
}
if (e.mongoFsyncUnLock() != nil) {
e.CleanupBackupEnv();
os.Exit(1);
}
}
}

Expand All @@ -122,7 +122,7 @@ func (e *BackupEnv) perforIncrementalBackup(backupId string) {
e.error.Printf("Can not find a common point in the oplog");
e.error.Printf("You must perform a full backup");

e.CleanupBackupEnv()
e.CleanupBackupEnv()
os.Exit(1);
}

Expand All @@ -135,25 +135,25 @@ func (e *BackupEnv) perforIncrementalBackup(backupId string) {
os.Exit(1)
}

firstOplogEntries = e.getOplogFirstEntries()["ts"].(bson.MongoTimestamp);
if firstOplogEntries > lastSavedOplog {
e.warning.Printf("Possible gap in the oplog, last known entry has been reached during the operation")
e.warning.Printf("if this message appears often, please consider increasing the oplog size")
e.warning.Printf("https://docs.mongodb.org/manual/tutorial/change-oplog-size/")
}

newEntry := BackupEntry{}
newEntry.Id = backupId
newEntry.Ts = time.Now()
newEntry.Source = e.Options.Mongohost
newEntry.Dest = e.backupdirectory
newEntry.Kind = e.Options.Kind
newEntry.Type = "inc"
newEntry.LastOplog = lop
newEntry.FirstOplog = fop
newEntry.Compress = e.Options.Compress
e.homeval.AddNewEntry(newEntry)
e.homeval.Flush()
firstOplogEntries = e.getOplogFirstEntries()["ts"].(bson.MongoTimestamp);
if firstOplogEntries > lastSavedOplog {
e.warning.Printf("Possible gap in the oplog, last known entry has been reached during the operation")
e.warning.Printf("if this message appears often, please consider increasing the oplog size")
e.warning.Printf("https://docs.mongodb.org/manual/tutorial/change-oplog-size/")
}

newEntry := BackupEntry{}
newEntry.Id = backupId
newEntry.Ts = time.Now()
newEntry.Source = e.Options.Mongohost
newEntry.Dest = e.backupdirectory
newEntry.Kind = e.Options.Kind
newEntry.Type = "inc"
newEntry.LastOplog = lop
newEntry.FirstOplog = fop
newEntry.Compress = e.Options.Compress
e.homeval.AddNewEntry(newEntry)
e.homeval.Flush()

e.info.Printf("Success, %fMB of data has been saved in %s", size / (1024*1024), e.backupdirectory);
}
108 changes: 54 additions & 54 deletions src/github.com/Dabz/mongobackup/copy.go
Expand Up @@ -5,17 +5,17 @@
** Login gaspar_d <d.gasparina@gmail.com>
**
** Started on Thu 24 Dec 23:43:24 2015 gaspar_d
** Last update Wed 6 Jan 20:05:17 2016 gaspar_d
** Last update Mon 7 Mar 16:52:44 2016 gaspar_d
*/

package mongobackup

import (
"os"
"io"
"strings"
"strings"
"github.com/pierrec/lz4"
"github.com/Dabz/utils"
"github.com/Dabz/utils"
)


Expand Down Expand Up @@ -49,14 +49,14 @@ func (e *BackupEnv) CopyFile(source string, dest string) (err error, backedByte
}

_, err = io.Copy(destfile, sourcefile)
if err != nil {
return err, 0
}
if err != nil {
return err, 0
}

sourceinfo, err := os.Stat(source);
if err != nil {
return err, 0
}
sourceinfo, err := os.Stat(source);
if err != nil {
return err, 0
}

return nil, sourceinfo.Size();
}
Expand Down Expand Up @@ -128,13 +128,13 @@ func (e *BackupEnv) recCopyDir(source string, dest string, backedByte int64, tot
err, backedByte = e.recCopyDir(sourcefilepointer, destinationfilepointer, backedByte, totalSize, pb)
if err != nil {
e.error.Println(err)
return err, 0
return err, 0
}
} else {
err, size := e.CopyFile(sourcefilepointer, destinationfilepointer);
if err != nil {
e.error.Println(err);
return err, 0
return err, 0
}
backedByte = backedByte + size;
pb.Show(float32(backedByte) / float32(totalSize))
Expand All @@ -150,82 +150,82 @@ func (e *BackupEnv) RestoreCopyDir(entry *BackupEntry, source string, dest strin
directory, _ := os.Open(source)
objects, err := directory.Readdir(-1)

if err != nil {
return err, 0
}
if err != nil {
return err, 0
}

for _, obj := range objects {
sourcefilepointer := source + "/" + obj.Name()
destinationfilepointer := dest + "/" + obj.Name()
if entry.Compress {
destinationfilepointer = strings.TrimSuffix(destinationfilepointer, ".lz4")
}
if entry.Compress {
destinationfilepointer = strings.TrimSuffix(destinationfilepointer, ".lz4")
}

if obj.IsDir() {
err,restoredByte = e.RestoreCopyDir(entry, sourcefilepointer, destinationfilepointer, restoredByte, totalRestored, pb)
err,restoredByte = e.RestoreCopyDir(entry, sourcefilepointer, destinationfilepointer, restoredByte, totalRestored, pb)
if err != nil {
e.error.Println(err)
return err, 0
}
} else {
err, byteSource := e.RestoreCopyFile(sourcefilepointer, destinationfilepointer, entry)
restoredByte += byteSource
pb.Show(float32(restoredByte) / float32(totalRestored))
return err, 0
}
} else {
err, byteSource := e.RestoreCopyFile(sourcefilepointer, destinationfilepointer, entry)
restoredByte += byteSource
pb.Show(float32(restoredByte) / float32(totalRestored))
if err != nil {
e.error.Println(err)
return err, 0
}
}
}
return err, 0
}
}
}

return nil, restoredByte
return nil, restoredByte
}


// Copy & Uncompress a specific file if required
func (e *BackupEnv) RestoreCopyFile(source string, dest string, entry *BackupEntry) (error, int64) {
var (
var (
sourcefile *os.File
destfile *os.File
err error
reader io.Reader
writer io.Writer
)
err error
reader io.Reader
writer io.Writer
)

sourcefile, err = os.Open(source);
if err != nil {
return err, 0;
}
defer sourcefile.Close();

if entry.Compress {
reader = lz4.NewReader(sourcefile)
} else {
reader = sourcefile
}
if entry.Compress {
reader = lz4.NewReader(sourcefile)
} else {
reader = sourcefile
}

destfile, err = os.Create(dest);
writer = destfile
if err != nil {
return err, 0;
}
defer destfile.Close();
destfile, err = os.Create(dest);
writer = destfile
if err != nil {
return err, 0;
}
defer destfile.Close();

_, err = io.Copy(writer, reader)
if err != nil {
return err, 0
}
if err != nil {
return err, 0
}

sourceinfo, err := os.Stat(source);
if err != nil {
return err, 0
}
sourceinfo, err := os.Stat(source);
if err != nil {
return err, 0
}

return nil, sourceinfo.Size();
}


func (e *BackupEnv) checkIfDirExist(dir string) (error) {
_, err := os.Stat(dir);
return err;
return err;
}

0 comments on commit 08ec120

Please sign in to comment.