From 46ece9c30bf1070f45e283dda87e273cca77c91a Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 29 May 2018 15:18:16 -0400 Subject: [PATCH 01/15] add specification for SiaFile --- modules/renter/siafile/siafile.go | 105 ++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 modules/renter/siafile/siafile.go diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go new file mode 100644 index 0000000000..795c174961 --- /dev/null +++ b/modules/renter/siafile/siafile.go @@ -0,0 +1,105 @@ +package siafile + +import ( + "crypto" + "os" + "sync" + "time" + + "github.com/NebulousLabs/Sia/modules" + "github.com/NebulousLabs/Sia/types" +) + +type ( + // SiaFile is the disk format for files uploaded to the Sia network. It + // contains all the necessary information to recover a file from its hosts and + // allows for easy constant-time updates of the file without having to read or + // write the whole file. + SiaFile struct { + // metadata is the mostly static metadata of a SiaFile. The reserved + // size of the metadata on disk should always be a multiple of 4kib. + // The metadata is also the only part of the file that is JSON encoded + // and can therefore be easily extended. + metadata Metadata + + // pubKeyTable stores the public keys of the hosts this file's pieces are uploaded to. + // Since multiple pieces from different chunks might be uploaded to the same host, this + // allows us to deduplicate the rather large public keys. + pubKeyTable []types.SiaPublicKey + + // chunks are the chunks the file was split into. + chunks []Chunk + + // utility fields. These are not persisted. + erasureCode modules.ErasureCoder + f *os.File + mu sync.Mutex + uid string + } + + // Metadata is the metadata of a SiaFile and is JSON encoded. + Metadata struct { + version [16]byte // version of the sia file format used + fileSize int64 // total size of the file re + mode os.FileMode // unix filemode of the sia file - uint32 + masterKey crypto.TwofishKey // masterkey used to encrypt pieces + + // following timestamps will be persisted using int64 unix timestamps + modTime time.Time // time of last content modification + changeTime time.Time // time of last metadata modification + accessTime time.Time // time of last access + createTime time.Time // time of file creation + + // chunkHeaderSize is the size of each of the following chunk's metadata. + chunkHeaderSize uint64 + // chunkBodySize is the size of each of the following chunk's bodies. + chunkBodySize uint64 + + // The following fields are the offsets for data that is written to disk + // after the pubKeyTable. We reserve a generous amount of space for the + // table and extra fields, but we need to remember those offsets in case we + // need to resize later on. + // + // chunkOffset is the offset of the first chunk, forced to be a factor of + // 4096, default 16kib + // + // pubKeyTableOffset is the office of the publicKeyTable within the + // file. + // + chunkOffset int64 + pubKeyTableOffset int64 + } + + // Chunk represents a single chunk of a file on disk + Chunk struct { + // erasure code settings. + // + // erasureCodeType specifies the algorithm used for erasure coding + // chunks. Available types are: + // 0 - Invalid / Missing Code + // 1 - Reed Solomon Code + // + // erasureCodeParams specifies possible parameters for a certain + // erasureCodeType. Currently params will be parsed as follows: + // Reed Solomon Code - 4 bytes dataPieces / 4 bytes parityPieces + // + erasureCodeType [4]byte + erasureCodeParams [8]byte + + // extensionInfo is some reserved space for each chunk that allows us + // to indicate if a chunk is special. + extensionInfo [16]byte + + // pieces are the pieces of the file the chunk consists of. + // The number of pieces should equal the number of + // dataPieces + parityPieces + pieces []Piece + } + + // Piece represents a single piece of a chunk on disk + Piece struct { + keyNonce [4]byte // nonce used for encrypting the piece + pubKeyOff uint16 // offset in the pubKeyTable + root crypto.Hash // merkle root of the piece + } +) From 1e7af24063dd859c974e291f6a76f6a1ceadc558 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 29 May 2018 16:49:07 -0400 Subject: [PATCH 02/15] remove unused methods --- modules/renter/persist.go | 117 --------------------------------- modules/renter/persist_test.go | 20 ------ 2 files changed, 137 deletions(-) diff --git a/modules/renter/persist.go b/modules/renter/persist.go index 907ee24144..2d1ab6e8e4 100644 --- a/modules/renter/persist.go +++ b/modules/renter/persist.go @@ -10,11 +10,9 @@ import ( "path/filepath" "strconv" - "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" - "github.com/NebulousLabs/Sia/types" ) const ( @@ -58,121 +56,6 @@ type ( } ) -// MarshalSia implements the encoding.SiaMarshaller interface, writing the -// file data to w. -func (f *file) MarshalSia(w io.Writer) error { - enc := encoding.NewEncoder(w) - - // encode easy fields - err := enc.EncodeAll( - f.name, - f.size, - f.masterKey, - f.pieceSize, - f.mode, - ) - if err != nil { - return err - } - // COMPATv0.4.3 - encode the bytesUploaded and chunksUploaded fields - // TODO: the resulting .sia file may confuse old clients. - err = enc.EncodeAll(f.pieceSize*f.numChunks()*uint64(f.erasureCode.NumPieces()), f.numChunks()) - if err != nil { - return err - } - - // encode erasureCode - switch code := f.erasureCode.(type) { - case *rsCode: - err = enc.EncodeAll( - "Reed-Solomon", - uint64(code.dataPieces), - uint64(code.numPieces-code.dataPieces), - ) - if err != nil { - return err - } - default: - if build.DEBUG { - panic("unknown erasure code") - } - return errors.New("unknown erasure code") - } - // encode contracts - if err := enc.Encode(uint64(len(f.contracts))); err != nil { - return err - } - for _, c := range f.contracts { - if err := enc.Encode(c); err != nil { - return err - } - } - return nil -} - -// UnmarshalSia implements the encoding.SiaUnmarshaller interface, -// reconstructing a file from the encoded bytes read from r. -func (f *file) UnmarshalSia(r io.Reader) error { - dec := encoding.NewDecoder(r) - - // COMPATv0.4.3 - decode bytesUploaded and chunksUploaded into dummy vars. - var bytesUploaded, chunksUploaded uint64 - - // Decode easy fields. - err := dec.DecodeAll( - &f.name, - &f.size, - &f.masterKey, - &f.pieceSize, - &f.mode, - &bytesUploaded, - &chunksUploaded, - ) - if err != nil { - return err - } - f.staticUID = persist.RandomSuffix() - - // Decode erasure coder. - var codeType string - if err := dec.Decode(&codeType); err != nil { - return err - } - switch codeType { - case "Reed-Solomon": - var nData, nParity uint64 - err = dec.DecodeAll( - &nData, - &nParity, - ) - if err != nil { - return err - } - rsc, err := NewRSCode(int(nData), int(nParity)) - if err != nil { - return err - } - f.erasureCode = rsc - default: - return errors.New("unrecognized erasure code type: " + codeType) - } - - // Decode contracts. - var nContracts uint64 - if err := dec.Decode(&nContracts); err != nil { - return err - } - f.contracts = make(map[types.FileContractID]fileContract) - var contract fileContract - for i := uint64(0); i < nContracts; i++ { - if err := dec.Decode(&contract); err != nil { - return err - } - f.contracts[contract.ID] = contract - } - return nil -} - // saveFile saves a file to the renter directory. func (r *Renter) saveFile(f *file) error { if f.deleted { diff --git a/modules/renter/persist_test.go b/modules/renter/persist_test.go index f8352ca4c4..26fb214c7f 100644 --- a/modules/renter/persist_test.go +++ b/modules/renter/persist_test.go @@ -1,7 +1,6 @@ package renter import ( - "bytes" "fmt" "os" "path/filepath" @@ -54,25 +53,6 @@ func equalFiles(f1, f2 *file) error { return nil } -// TestFileMarshalling tests the MarshalSia and UnmarshalSia functions of the -// file type. -func TestFileMarshalling(t *testing.T) { - savedFile := newTestingFile() - buf := new(bytes.Buffer) - savedFile.MarshalSia(buf) - - loadedFile := new(file) - err := loadedFile.UnmarshalSia(buf) - if err != nil { - t.Fatal(err) - } - - err = equalFiles(savedFile, loadedFile) - if err != nil { - t.Fatal(err) - } -} - // TestFileShareLoad tests the sharing/loading functions of the renter. func TestFileShareLoad(t *testing.T) { if testing.Short() { From 0c0c7155f2885ded6dfc69b864ae45bcb2cb8e67 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 18 Jun 2018 14:04:46 -0400 Subject: [PATCH 03/15] Add mode and size functions --- modules/renter/download.go | 10 ++++---- modules/renter/downloadstreamer.go | 4 +-- modules/renter/files.go | 18 +++----------- modules/renter/renter.go | 5 ++-- modules/renter/siafile/metadata.go | 39 ++++++++++++++++++++++++++++++ modules/renter/siafile/siafile.go | 13 +++++----- 6 files changed, 58 insertions(+), 31 deletions(-) create mode 100644 modules/renter/siafile/metadata.go diff --git a/modules/renter/download.go b/modules/renter/download.go index a13c86cda2..a7decadbbb 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -285,16 +285,16 @@ func (r *Renter) managedDownload(p modules.RenterDownloadParameters) (*download, if p.Destination != "" && !filepath.IsAbs(p.Destination) { return nil, errors.New("destination must be an absolute path") } - if p.Offset == file.size { + if p.Offset == file.Size() { return nil, errors.New("offset equals filesize") } // Sentinel: if length == 0, download the entire file. if p.Length == 0 { - p.Length = file.size - p.Offset + p.Length = file.Size() - p.Offset } // Check whether offset and length is valid. - if p.Offset < 0 || p.Offset+p.Length > file.size { - return nil, fmt.Errorf("offset and length combination invalid, max byte is at index %d", file.size-1) + if p.Offset < 0 || p.Offset+p.Length > file.Size() { + return nil, fmt.Errorf("offset and length combination invalid, max byte is at index %d", file.Size()-1) } // Instantiate the correct downloadWriter implementation. @@ -304,7 +304,7 @@ func (r *Renter) managedDownload(p modules.RenterDownloadParameters) (*download, dw = newDownloadDestinationWriteCloserFromWriter(p.Httpwriter) destinationType = "http stream" } else { - osFile, err := os.OpenFile(p.Destination, os.O_CREATE|os.O_WRONLY, os.FileMode(file.mode)) + osFile, err := os.OpenFile(p.Destination, os.O_CREATE|os.O_WRONLY, file.Mode()) if err != nil { return nil, err } diff --git a/modules/renter/downloadstreamer.go b/modules/renter/downloadstreamer.go index d07990e75c..1f71296c8e 100644 --- a/modules/renter/downloadstreamer.go +++ b/modules/renter/downloadstreamer.go @@ -38,7 +38,7 @@ func (r *Renter) Streamer(siaPath string) (string, io.ReadSeeker, error) { lockID := r.mu.RLock() file, exists := r.files[siaPath] r.mu.RUnlock(lockID) - if !exists || file.deleted { + if !exists || file.Deleted() { return "", nil, fmt.Errorf("no file with that path: %s", siaPath) } // Create the streamer @@ -46,7 +46,7 @@ func (r *Renter) Streamer(siaPath string) (string, io.ReadSeeker, error) { file: file, r: r, } - return file.name, s, nil + return file.Name(), s, nil } // Read implements the standard Read interface. It will download the requested diff --git a/modules/renter/files.go b/modules/renter/files.go index 2feb223b6c..44ea95425b 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -1,7 +1,6 @@ package renter import ( - "errors" "fmt" "math" "os" @@ -13,6 +12,8 @@ import ( "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" + + "github.com/NebulousLabs/errors" ) var ( @@ -262,24 +263,11 @@ func (r *Renter) DeleteFile(nickname string) error { delete(r.files, nickname) delete(r.persist.Tracking, nickname) - err := persist.RemoveFile(filepath.Join(r.persistDir, f.name+ShareExtension)) - if err != nil { - r.log.Println("WARN: couldn't remove file :", err) - } - r.saveSync() r.mu.Unlock(lockID) - // delete the file's associated contract data. - f.mu.Lock() - defer f.mu.Unlock() - - // mark the file as deleted - f.deleted = true - // TODO: delete the sectors of the file as well. - - return nil + return errors.AddContext(f.Delete(), "failed to delete file") } // FileList returns all of the files that the renter has. diff --git a/modules/renter/renter.go b/modules/renter/renter.go index bd40764d1c..036b428972 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -30,6 +30,7 @@ import ( "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/contractor" "github.com/NebulousLabs/Sia/modules/renter/hostdb" + "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" @@ -175,9 +176,7 @@ type trackedFile struct { type Renter struct { // File management. // - // tracking contains a list of files that the user intends to maintain. By - // default, files loaded through sharing are not maintained by the user. - files map[string]*file + files map[string]*siafile.SiaFile // Download management. The heap has a separate mutex because it is always // accessed in isolation. diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go new file mode 100644 index 0000000000..16bbc8f55a --- /dev/null +++ b/modules/renter/siafile/metadata.go @@ -0,0 +1,39 @@ +package siafile + +import ( + "os" + + "github.com/NebulousLabs/Sia/types" +) + +// Delete removes the file from disk and marks it as deleted. Once the file is +// deleted, certain methods should return an error. +func (sf *SiaFile) Delete() error { + panic("not implemented yet") +} + +// Deleted indicates if this file has been deleted by the user. +func (sf *SiaFile) Deleted() bool { + panic("not implemented yet") +} + +// HostPublicKeys returns all the public keys of hosts the file has ever been +// uploaded to. That means some of those hosts might no longer be in use. +func (sf *SiaFile) HostPublicKeys() []types.SiaPublicKey { + panic("not implemented yet") +} + +// Mode returns the FileMode of the SiaFile. +func (sf *SiaFile) Mode() os.FileMode { + panic("not implemented yet") +} + +// Name returns the file's name. +func (sf *SiaFile) Name() string { + panic("not implemented yet") +} + +// Size returns the file's size. +func (sf *SiaFile) Size() uint64 { + panic("not implemented yet") +} diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index 795c174961..fe2795069b 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -1,13 +1,14 @@ package siafile import ( - "crypto" "os" "sync" "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" + + "github.com/NebulousLabs/Sia/crypto" ) type ( @@ -32,17 +33,17 @@ type ( // utility fields. These are not persisted. erasureCode modules.ErasureCoder - f *os.File + filePath string mu sync.Mutex uid string } // Metadata is the metadata of a SiaFile and is JSON encoded. Metadata struct { - version [16]byte // version of the sia file format used - fileSize int64 // total size of the file re - mode os.FileMode // unix filemode of the sia file - uint32 - masterKey crypto.TwofishKey // masterkey used to encrypt pieces + version [16]byte // version of the sia file format used + staticFileSize int64 // total size of the file re + mode os.FileMode // unix filemode of the sia file - uint32 + masterKey crypto.TwofishKey // masterkey used to encrypt pieces // following timestamps will be persisted using int64 unix timestamps modTime time.Time // time of last content modification From 4d2046e6456dbf21afdecd784a5ec7c26f496262 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 18 Jun 2018 14:10:42 -0400 Subject: [PATCH 04/15] move metadata to metadata.go --- modules/renter/siafile/metadata.go | 42 ++++++++++++++++++++++++++++++ modules/renter/siafile/siafile.go | 35 ------------------------- 2 files changed, 42 insertions(+), 35 deletions(-) diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go index 16bbc8f55a..36064df26d 100644 --- a/modules/renter/siafile/metadata.go +++ b/modules/renter/siafile/metadata.go @@ -2,10 +2,52 @@ package siafile import ( "os" + "time" + "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/types" ) +type ( + // Metadata is the metadata of a SiaFile and is JSON encoded. + Metadata struct { + version [16]byte // version of the sia file format used + staticFileSize int64 // total size of the file + masterKey crypto.TwofishKey // masterkey used to encrypt pieces + trackingPath string // file to the local copy of the file used for repairing + + // The following fields are the usual unix timestamps of files. + modTime time.Time // time of last content modification + changeTime time.Time // time of last metadata modification + accessTime time.Time // time of last access + createTime time.Time // time of file creation + + // File ownership/permission fields. + mode os.FileMode // unix filemode of the sia file - uint32 + uid int // id of the user who owns the file + gid int // id of the group that owns the file + + // chunkHeaderSize is the size of each of the following chunk's metadata. + chunkHeaderSize uint64 + // chunkBodySize is the size of each of the following chunk's bodies. + chunkBodySize uint64 + + // The following fields are the offsets for data that is written to disk + // after the pubKeyTable. We reserve a generous amount of space for the + // table and extra fields, but we need to remember those offsets in case we + // need to resize later on. + // + // chunkOffset is the offset of the first chunk, forced to be a factor of + // 4096, default 16kib + // + // pubKeyTableOffset is the office of the publicKeyTable within the + // file. + // + chunkOffset int64 + pubKeyTableOffset int64 + } +) + // Delete removes the file from disk and marks it as deleted. Once the file is // deleted, certain methods should return an error. func (sf *SiaFile) Delete() error { diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index fe2795069b..a81797ab73 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -1,9 +1,7 @@ package siafile import ( - "os" "sync" - "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" @@ -38,39 +36,6 @@ type ( uid string } - // Metadata is the metadata of a SiaFile and is JSON encoded. - Metadata struct { - version [16]byte // version of the sia file format used - staticFileSize int64 // total size of the file re - mode os.FileMode // unix filemode of the sia file - uint32 - masterKey crypto.TwofishKey // masterkey used to encrypt pieces - - // following timestamps will be persisted using int64 unix timestamps - modTime time.Time // time of last content modification - changeTime time.Time // time of last metadata modification - accessTime time.Time // time of last access - createTime time.Time // time of file creation - - // chunkHeaderSize is the size of each of the following chunk's metadata. - chunkHeaderSize uint64 - // chunkBodySize is the size of each of the following chunk's bodies. - chunkBodySize uint64 - - // The following fields are the offsets for data that is written to disk - // after the pubKeyTable. We reserve a generous amount of space for the - // table and extra fields, but we need to remember those offsets in case we - // need to resize later on. - // - // chunkOffset is the offset of the first chunk, forced to be a factor of - // 4096, default 16kib - // - // pubKeyTableOffset is the office of the publicKeyTable within the - // file. - // - chunkOffset int64 - pubKeyTableOffset int64 - } - // Chunk represents a single chunk of a file on disk Chunk struct { // erasure code settings. From 071db4d8d7a00c6adf58c5bd4351007e78af9d47 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 18 Jun 2018 15:02:11 -0400 Subject: [PATCH 05/15] tmp --- modules/renter/files.go | 51 ++++++++++++++++-------------- modules/renter/siafile/metadata.go | 43 +++++++++++++++++++++++-- modules/renter/siafile/siafile.go | 1 - 3 files changed, 69 insertions(+), 26 deletions(-) diff --git a/modules/renter/files.go b/modules/renter/files.go index 44ea95425b..20edbe10fe 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -10,6 +10,7 @@ import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" + "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" @@ -334,50 +335,44 @@ func (r *Renter) File(siaPath string) (modules.FileInfo, error) { var fileInfo modules.FileInfo // Get the file and its contracts - contractIDs := make(map[types.FileContractID]struct{}) lockID := r.mu.RLock() - defer r.mu.RUnlock(lockID) file, exists := r.files[siaPath] + r.mu.RUnlock(lockID) if !exists { return fileInfo, ErrUnknownPath } - file.mu.RLock() - defer file.mu.RUnlock() - for cid := range file.contracts { - contractIDs[cid] = struct{}{} - } + pks := file.HostPublicKeys() // Build 2 maps that map every contract id to its offline and goodForRenew // status. goodForRenew := make(map[types.FileContractID]bool) offline := make(map[types.FileContractID]bool) - for cid := range contractIDs { - resolvedKey := r.hostContractor.ResolveIDToPubKey(cid) - cu, ok := r.hostContractor.ContractUtility(resolvedKey) + for pk := range pks { + cu, ok := r.hostContractor.ContractUtility(pk) if !ok { continue } goodForRenew[cid] = ok && cu.GoodForRenew - offline[cid] = r.hostContractor.IsOffline(resolvedKey) + offline[cid] = r.hostContractor.IsOffline(pk) } // Build the FileInfo renewing := true var localPath string - tf, exists := r.persist.Tracking[file.name] + tf, exists := r.persist.Tracking[file.Name()] if exists { localPath = tf.RepairPath } fileInfo = modules.FileInfo{ - SiaPath: file.name, + SiaPath: file.SiaPath(), LocalPath: localPath, - Filesize: file.size, + Filesize: file.Size(), Renewing: renewing, - Available: file.available(offline), - Redundancy: file.redundancy(offline, goodForRenew), - UploadedBytes: file.uploadedBytes(), - UploadProgress: file.uploadProgress(), - Expiration: file.expiration(), + Available: file.Available(offline), + Redundancy: file.Redundancy(offline, goodForRenew), + UploadedBytes: file.UploadedBytes(), + UploadProgress: file.UploadProgress(), + Expiration: file.Expiration(), } return fileInfo, nil @@ -406,10 +401,8 @@ func (r *Renter) RenameFile(currentName, newName string) error { } // Modify the file and save it to disk. - file.mu.Lock() - file.name = newName - err = r.saveFile(file) - file.mu.Unlock() + file.Rename(newName) + err = r.saveFile(siaFileToFile(file)) if err != nil { return err } @@ -430,3 +423,15 @@ func (r *Renter) RenameFile(currentName, newName string) error { oldPath := filepath.Join(r.persistDir, currentName+ShareExtension) return os.RemoveAll(oldPath) } + +// fileToSiaFile converts a legacy file to a SiaFile. Fields that can't be +// populated using the legacy file remain blank. +func fileToSiaFile(f *file) *siafile.SiaFile { + panic("not implemented yet") +} + +// siaFileToFile converts a SiaFile to a legacy file. Fields that don't exist +// in the legacy file will get lost and therefore not persisted. +func siaFileToFile(sf *siafile.SiaFile) *file { + panic("not implemented yet") +} diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go index 36064df26d..64a1a5de6c 100644 --- a/modules/renter/siafile/metadata.go +++ b/modules/renter/siafile/metadata.go @@ -15,6 +15,7 @@ type ( staticFileSize int64 // total size of the file masterKey crypto.TwofishKey // masterkey used to encrypt pieces trackingPath string // file to the local copy of the file used for repairing + siaPath string // The following fields are the usual unix timestamps of files. modTime time.Time // time of last content modification @@ -48,6 +49,11 @@ type ( } ) +// Available indicates whether the file is ready to be downloaded. +func (sf *SiaFile) Available(offline map[types.FileContractID]bool) bool { + panic("not implemented yet") +} + // Delete removes the file from disk and marks it as deleted. Once the file is // deleted, certain methods should return an error. func (sf *SiaFile) Delete() error { @@ -59,6 +65,12 @@ func (sf *SiaFile) Deleted() bool { panic("not implemented yet") } +// Expiration returns the lowest height at which any of the file's contracts +// will expire. +func (sf *SiaFile) Expiration() types.BlockHeight { + panic("not implemented yet") +} + // HostPublicKeys returns all the public keys of hosts the file has ever been // uploaded to. That means some of those hosts might no longer be in use. func (sf *SiaFile) HostPublicKeys() []types.SiaPublicKey { @@ -70,8 +82,21 @@ func (sf *SiaFile) Mode() os.FileMode { panic("not implemented yet") } -// Name returns the file's name. -func (sf *SiaFile) Name() string { +// Redundancy returns the redundancy of the least redundant chunk. A file +// becomes available when this redundancy is >= 1. Assumes that every piece is +// unique within a file contract. -1 is returned if the file has size 0. It +// takes one argument, a map of offline contracts for this file. +func (sf *SiaFile) Redundancy(offlineMap map[types.FileContractID]bool, goodForRenewMap map[types.FileContractID]bool) float64 { + panic("not implemented yet") +} + +// Rename changes the name of the file to a new one. +func (sf *SiaFile) Rename(newName string) string { + panic("not implemented yet") +} + +// SiaPath returns the file's sia path. +func (sf *SiaFile) SiaPath() string { panic("not implemented yet") } @@ -79,3 +104,17 @@ func (sf *SiaFile) Name() string { func (sf *SiaFile) Size() uint64 { panic("not implemented yet") } + +// UploadedBytes indicates how many bytes of the file have been uploaded via +// current file contracts. Note that this includes padding and redundancy, so +// uploadedBytes can return a value much larger than the file's original filesize. +func (sf *SiaFile) UploadedBytes() uint64 { + panic("not implemented yet") +} + +// UploadProgress indicates what percentage of the file (plus redundancy) has +// been uploaded. Note that a file may be Available long before UploadProgress +// reaches 100%, and UploadProgress may report a value greater than 100%. +func (sf *SiaFile) UploadProgress() float64 { + panic("not implemented yet") +} diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index a81797ab73..04947f3e96 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -31,7 +31,6 @@ type ( // utility fields. These are not persisted. erasureCode modules.ErasureCoder - filePath string mu sync.Mutex uid string } From 967f821bf7810a330aa2836130ab8367b2df1169 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 18 Jun 2018 18:08:33 -0400 Subject: [PATCH 06/15] Replace all the occurences of the legacy file with the new file by creating unimplemented methods --- modules/renter/download.go | 57 +++++------ modules/renter/downloadstreamer.go | 15 ++- modules/renter/files.go | 77 +++++++------- modules/renter/files_test.go | 40 +++----- modules/renter/persist.go | 26 +++-- modules/renter/persist_test.go | 95 ++++++++--------- modules/renter/renter.go | 2 +- modules/renter/siafile/metadata.go | 48 +++++++-- modules/renter/siafile/siafile.go | 71 ++++++++++++- modules/renter/upload.go | 4 +- modules/renter/uploadchunk.go | 18 ++-- modules/renter/uploadheap.go | 159 +++++++++++++++-------------- modules/renter/workerupload.go | 27 ++--- 13 files changed, 356 insertions(+), 283 deletions(-) diff --git a/modules/renter/download.go b/modules/renter/download.go index a7decadbbb..7bcac46893 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -132,6 +132,7 @@ import ( "time" "github.com/NebulousLabs/Sia/modules" + "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" @@ -178,7 +179,7 @@ type ( destination downloadDestination // The place to write the downloaded data. destinationType string // "file", "buffer", "http stream", etc. destinationString string // The string to report to the user for the destination. - file *file // The file to download. + file *siafile.SiaFile // The file to download. latencyTarget time.Duration // Workers above this latency will be automatically put on standby initially. length uint64 // Length of download. Cannot be 0. @@ -352,7 +353,7 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { if params.offset < 0 { return nil, errors.New("download offset cannot be a negative number") } - if params.offset+params.length > params.file.size { + if params.offset+params.length > params.file.Size() { return nil, errors.New("download is requesting data past the boundary of the file") } @@ -369,7 +370,7 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { staticLength: params.length, staticOffset: params.offset, staticOverdrive: params.overdrive, - staticSiaPath: params.file.name, + staticSiaPath: params.file.SiaPath(), staticPriority: params.priority, log: r.log, @@ -377,34 +378,26 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { } // Determine which chunks to download. - minChunk := params.offset / params.file.staticChunkSize() - maxChunk := (params.offset + params.length - 1) / params.file.staticChunkSize() + minChunk := params.offset / params.file.ChunkSize() + maxChunk := (params.offset + params.length - 1) / params.file.ChunkSize() // For each chunk, assemble a mapping from the contract id to the index of // the piece within the chunk that the contract is responsible for. chunkMaps := make([]map[string]downloadPieceInfo, maxChunk-minChunk+1) for i := range chunkMaps { chunkMaps[i] = make(map[string]downloadPieceInfo) - } - params.file.mu.Lock() - for id, contract := range params.file.contracts { - resolvedKey := r.hostContractor.ResolveIDToPubKey(id) - for _, piece := range contract.Pieces { - if piece.Chunk >= minChunk && piece.Chunk <= maxChunk { - // Sanity check - the same worker should not have two pieces for - // the same chunk. - _, exists := chunkMaps[piece.Chunk-minChunk][string(resolvedKey.Key)] - if exists { - r.log.Println("ERROR: Worker has multiple pieces uploaded for the same chunk.") - } - chunkMaps[piece.Chunk-minChunk][string(resolvedKey.Key)] = downloadPieceInfo{ - index: piece.Piece, - root: piece.MerkleRoot, - } + for j := uint64(0); j < uint64(params.file.NumPieces()); j++ { + piece, err := params.file.Piece(uint64(i), j) + if err != nil { + return nil, err + } + chunkMaps[i][string(piece.HostPubKey.Key)] = downloadPieceInfo{ + index: j, + root: piece.MerkleRoot, } } + } - params.file.mu.Unlock() // Queue the downloads for each chunk. writeOffset := int64(0) // where to write a chunk within the download destination. @@ -412,14 +405,14 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { for i := minChunk; i <= maxChunk; i++ { udc := &unfinishedDownloadChunk{ destination: params.destination, - erasureCode: params.file.erasureCode, - masterKey: params.file.masterKey, + erasureCode: params.file.ErasureCode(), + masterKey: params.file.MasterKey(), staticChunkIndex: i, staticCacheID: fmt.Sprintf("%v:%v", d.staticSiaPath, i), staticChunkMap: chunkMaps[i-minChunk], - staticChunkSize: params.file.staticChunkSize(), - staticPieceSize: params.file.pieceSize, + staticChunkSize: params.file.ChunkSize(), + staticPieceSize: params.file.PieceSize(), // TODO: 25ms is just a guess for a good default. Really, we want to // set the latency target such that slower workers will pick up the @@ -434,8 +427,8 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { staticNeedsMemory: params.needsMemory, staticPriority: params.priority, - physicalChunkData: make([][]byte, params.file.erasureCode.NumPieces()), - pieceUsage: make([]bool, params.file.erasureCode.NumPieces()), + physicalChunkData: make([][]byte, params.file.ErasureCode().NumPieces()), + pieceUsage: make([]bool, params.file.ErasureCode().NumPieces()), download: d, staticStreamCache: r.staticStreamCache, @@ -444,16 +437,16 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { // Set the fetchOffset - the offset within the chunk that we start // downloading from. if i == minChunk { - udc.staticFetchOffset = params.offset % params.file.staticChunkSize() + udc.staticFetchOffset = params.offset % params.file.ChunkSize() } else { udc.staticFetchOffset = 0 } // Set the fetchLength - the number of bytes to fetch within the chunk // that we start downloading from. - if i == maxChunk && (params.length+params.offset)%params.file.staticChunkSize() != 0 { - udc.staticFetchLength = ((params.length + params.offset) % params.file.staticChunkSize()) - udc.staticFetchOffset + if i == maxChunk && (params.length+params.offset)%params.file.ChunkSize() != 0 { + udc.staticFetchLength = ((params.length + params.offset) % params.file.ChunkSize()) - udc.staticFetchOffset } else { - udc.staticFetchLength = params.file.staticChunkSize() - udc.staticFetchOffset + udc.staticFetchLength = params.file.ChunkSize() - udc.staticFetchOffset } // Set the writeOffset within the destination for where the data should // be written. diff --git a/modules/renter/downloadstreamer.go b/modules/renter/downloadstreamer.go index 1f71296c8e..18dca3deab 100644 --- a/modules/renter/downloadstreamer.go +++ b/modules/renter/downloadstreamer.go @@ -7,6 +7,7 @@ import ( "math" "time" + "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/errors" ) @@ -14,7 +15,7 @@ type ( // streamer is a io.ReadSeeker that can be used to stream downloads from // the sia network. streamer struct { - file *file + file *siafile.SiaFile offset int64 r *Renter } @@ -46,7 +47,7 @@ func (r *Renter) Streamer(siaPath string) (string, io.ReadSeeker, error) { file: file, r: r, } - return file.Name(), s, nil + return file.SiaPath(), s, nil } // Read implements the standard Read interface. It will download the requested @@ -55,9 +56,7 @@ func (r *Renter) Streamer(siaPath string) (string, io.ReadSeeker, error) { // only request a single chunk at once. func (s *streamer) Read(p []byte) (n int, err error) { // Get the file's size - s.file.mu.RLock() - fileSize := int64(s.file.size) - s.file.mu.RUnlock() + fileSize := int64(s.file.Size()) // Make sure we haven't reached the EOF yet. if s.offset >= fileSize { @@ -65,7 +64,7 @@ func (s *streamer) Read(p []byte) (n int, err error) { } // Calculate how much we can download. We never download more than a single chunk. - chunkSize := s.file.staticChunkSize() + chunkSize := s.file.ChunkSize() remainingData := uint64(fileSize - s.offset) requestedData := uint64(len(p)) remainingChunk := chunkSize - uint64(s.offset)%chunkSize @@ -127,9 +126,7 @@ func (s *streamer) Seek(offset int64, whence int) (int64, error) { case io.SeekCurrent: newOffset = s.offset case io.SeekEnd: - s.file.mu.RLock() - newOffset = int64(s.file.size) - s.file.mu.RUnlock() + newOffset = int64(s.file.Size()) } newOffset += offset diff --git a/modules/renter/files.go b/modules/renter/files.go index 20edbe10fe..de57fdf72b 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -273,58 +273,59 @@ func (r *Renter) DeleteFile(nickname string) error { // FileList returns all of the files that the renter has. func (r *Renter) FileList() []modules.FileInfo { - // Get all the files and their contracts - var files []*file - contractIDs := make(map[types.FileContractID]struct{}) + // Get all the files holding the readlock. lockID := r.mu.RLock() + files := make([]*siafile.SiaFile, 0, len(r.files)) + for _, file := range r.files { + files = append(files, file) + } + r.mu.RUnlock(lockID) + + // Save host keys in map. We can't do that under the same lock since we + // need to call a public method on the file. + pks := make(map[string]types.SiaPublicKey) for _, f := range r.files { - files = append(files, f) - f.mu.RLock() - for cid := range f.contracts { - contractIDs[cid] = struct{}{} + for _, pk := range f.HostPublicKeys() { + pks[string(pk.Key)] = pk } - f.mu.RUnlock() } - r.mu.RUnlock(lockID) // Build 2 maps that map every contract id to its offline and goodForRenew // status. - goodForRenew := make(map[types.FileContractID]bool) - offline := make(map[types.FileContractID]bool) - for cid := range contractIDs { - resolvedKey := r.hostContractor.ResolveIDToPubKey(cid) - cu, ok := r.hostContractor.ContractUtility(resolvedKey) + goodForRenew := make(map[string]bool) + offline := make(map[string]bool) + for _, pk := range pks { + cu, ok := r.hostContractor.ContractUtility(pk) if !ok { continue } - goodForRenew[cid] = ok && cu.GoodForRenew - offline[cid] = r.hostContractor.IsOffline(resolvedKey) + goodForRenew[string(pk.Key)] = ok && cu.GoodForRenew + offline[string(pk.Key)] = r.hostContractor.IsOffline(pk) } // Build the list of FileInfos. fileList := []modules.FileInfo{} for _, f := range files { - lockID := r.mu.RLock() - f.mu.RLock() - renewing := true var localPath string - tf, exists := r.persist.Tracking[f.name] + siaPath := f.SiaPath() + lockID := r.mu.RLock() + tf, exists := r.persist.Tracking[siaPath] + r.mu.RUnlock(lockID) if exists { localPath = tf.RepairPath } + fileList = append(fileList, modules.FileInfo{ - SiaPath: f.name, + SiaPath: f.SiaPath(), LocalPath: localPath, - Filesize: f.size, - Renewing: renewing, - Available: f.available(offline), - Redundancy: f.redundancy(offline, goodForRenew), - UploadedBytes: f.uploadedBytes(), - UploadProgress: f.uploadProgress(), - Expiration: f.expiration(), + Filesize: f.Size(), + Renewing: true, + Available: f.Available(offline), + Redundancy: f.Redundancy(offline, goodForRenew), + UploadedBytes: f.UploadedBytes(), + UploadProgress: f.UploadProgress(), + Expiration: f.Expiration(), }) - f.mu.RUnlock() - r.mu.RUnlock(lockID) } return fileList } @@ -345,21 +346,21 @@ func (r *Renter) File(siaPath string) (modules.FileInfo, error) { // Build 2 maps that map every contract id to its offline and goodForRenew // status. - goodForRenew := make(map[types.FileContractID]bool) - offline := make(map[types.FileContractID]bool) - for pk := range pks { + goodForRenew := make(map[string]bool) + offline := make(map[string]bool) + for _, pk := range pks { cu, ok := r.hostContractor.ContractUtility(pk) if !ok { continue } - goodForRenew[cid] = ok && cu.GoodForRenew - offline[cid] = r.hostContractor.IsOffline(pk) + goodForRenew[string(pk.Key)] = ok && cu.GoodForRenew + offline[string(pk.Key)] = r.hostContractor.IsOffline(pk) } // Build the FileInfo renewing := true var localPath string - tf, exists := r.persist.Tracking[file.Name()] + tf, exists := r.persist.Tracking[file.SiaPath()] if exists { localPath = tf.RepairPath } @@ -401,8 +402,8 @@ func (r *Renter) RenameFile(currentName, newName string) error { } // Modify the file and save it to disk. - file.Rename(newName) - err = r.saveFile(siaFileToFile(file)) + file.Rename(newName) // TODO: violation of locking convention + err = r.saveFile(file) if err != nil { return err } diff --git a/modules/renter/files_test.go b/modules/renter/files_test.go index 5c55f2e091..cca3c506df 100644 --- a/modules/renter/files_test.go +++ b/modules/renter/files_test.go @@ -264,9 +264,7 @@ func TestRenterFileListLocalPath(t *testing.T) { defer rt.Close() id := rt.renter.mu.Lock() f := newTestingFile() - f.name = "testname" - rt.renter.files["test"] = f - rt.renter.persist.Tracking[f.name] = trackedFile{ + rt.renter.persist.Tracking[f.SiaPath()] = trackedFile{ RepairPath: "TestPath", } rt.renter.mu.Unlock(id) @@ -297,16 +295,15 @@ func TestRenterDeleteFile(t *testing.T) { } // Put a file in the renter. - rt.renter.files["1"] = &file{ - name: "one", - } + file1 := newTestingFile() + rt.renter.files["1"] = file1 // Delete a different file. err = rt.renter.DeleteFile("one") if err != ErrUnknownPath { t.Error("Expected ErrUnknownPath, got", err) } // Delete the file. - err = rt.renter.DeleteFile("1") + err = rt.renter.DeleteFile(file1.SiaPath()) if err != nil { t.Error(err) } @@ -316,9 +313,9 @@ func TestRenterDeleteFile(t *testing.T) { // Put a file in the renter, then rename it. f := newTestingFile() - f.name = "1" - rt.renter.files[f.name] = f - rt.renter.RenameFile(f.name, "one") + f.Rename("1") // set name to "1" + rt.renter.files[f.SiaPath()] = f + rt.renter.RenameFile(f.SiaPath(), "one") // Call delete on the previous name. err = rt.renter.DeleteFile("1") if err != ErrUnknownPath { @@ -363,12 +360,8 @@ func TestRenterFileList(t *testing.T) { } // Put a file in the renter. - rsc, _ := NewRSCode(1, 1) - rt.renter.files["1"] = &file{ - name: "one", - erasureCode: rsc, - pieceSize: 1, - } + file1 := newTestingFile() + rt.renter.files["1"] = file1 if len(rt.renter.FileList()) != 1 { t.Error("FileList is not returning the only file in the renter") } @@ -377,17 +370,14 @@ func TestRenterFileList(t *testing.T) { } // Put multiple files in the renter. - rt.renter.files["2"] = &file{ - name: "two", - erasureCode: rsc, - pieceSize: 1, - } + file2 := newTestingFile() + rt.renter.files["2"] = file2 if len(rt.renter.FileList()) != 2 { t.Error("FileList is not returning both files in the renter") } files := rt.renter.FileList() - if !((files[0].SiaPath == "one" || files[0].SiaPath == "two") && - (files[1].SiaPath == "one" || files[1].SiaPath == "two") && + if !((files[0].SiaPath == file1.SiaPath() || files[0].SiaPath == file2.SiaPath()) && + (files[1].SiaPath == file1.SiaPath() || files[1].SiaPath == file2.SiaPath()) && (files[0].SiaPath != files[1].SiaPath)) { t.Error("FileList is returning wrong names for the files:", files[0].SiaPath, files[1].SiaPath) } @@ -412,7 +402,7 @@ func TestRenterRenameFile(t *testing.T) { // Rename a file that does exist. f := newTestingFile() - f.name = "1" + f.Rename("1") rt.renter.files["1"] = f err = rt.renter.RenameFile("1", "1a") if err != nil { @@ -428,7 +418,7 @@ func TestRenterRenameFile(t *testing.T) { // Rename a file to an existing name. f2 := newTestingFile() - f2.name = "1" + f2.Rename("1") rt.renter.files["1"] = f2 err = rt.renter.RenameFile("1", "1a") if err != ErrPathOverload { diff --git a/modules/renter/persist.go b/modules/renter/persist.go index 2d1ab6e8e4..d9682cebac 100644 --- a/modules/renter/persist.go +++ b/modules/renter/persist.go @@ -12,6 +12,7 @@ import ( "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" + "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/Sia/persist" ) @@ -57,26 +58,26 @@ type ( ) // saveFile saves a file to the renter directory. -func (r *Renter) saveFile(f *file) error { - if f.deleted { +func (r *Renter) saveFile(f *siafile.SiaFile) error { + if f.Deleted() { // TODO: violation of locking convention return errors.New("can't save deleted file") } // Create directory structure specified in nickname. - fullPath := filepath.Join(r.persistDir, f.name+ShareExtension) + fullPath := filepath.Join(r.persistDir, f.SiaPath()+ShareExtension) err := os.MkdirAll(filepath.Dir(fullPath), 0700) if err != nil { return err } // Open SafeFile handle. - handle, err := persist.NewSafeFile(filepath.Join(r.persistDir, f.name+ShareExtension)) + handle, err := persist.NewSafeFile(filepath.Join(r.persistDir, f.SiaPath()+ShareExtension)) if err != nil { return err } defer handle.Close() // Write file data. - err = shareFiles([]*file{f}, handle) + err = shareFiles([]*siafile.SiaFile{f}, handle) if err != nil { return err } @@ -161,7 +162,12 @@ func (r *Renter) loadSettings() error { // shareFiles writes the specified files to w. First a header is written, // followed by the gzipped concatenation of each file. -func shareFiles(files []*file, w io.Writer) error { +func shareFiles(siaFiles []*siafile.SiaFile, w io.Writer) error { + // Convert files to old type. + files := make([]*file, 0, len(siaFiles)) + for _, sf := range siaFiles { + files = append(files, siaFileToFile(sf)) + } // Write header. err := encoding.NewEncoder(w).EncodeAll( shareHeader, @@ -204,7 +210,7 @@ func (r *Renter) ShareFiles(nicknames []string, shareDest string) error { defer handle.Close() // Load files from renter. - files := make([]*file, len(nicknames)) + files := make([]*siafile.SiaFile, len(nicknames)) for i, name := range nicknames { f, exists := r.files[name] if !exists { @@ -228,7 +234,7 @@ func (r *Renter) ShareFilesASCII(nicknames []string) (string, error) { defer r.mu.RUnlock(lockID) // Load files from renter. - files := make([]*file, len(nicknames)) + files := make([]*siafile.SiaFile, len(nicknames)) for i, name := range nicknames { f, exists := r.files[name] if !exists { @@ -298,12 +304,12 @@ func (r *Renter) loadSharedFiles(reader io.Reader) ([]string, error) { // Add files to renter. names := make([]string, numFiles) for i, f := range files { - r.files[f.name] = f + r.files[f.name] = fileToSiaFile(f) names[i] = f.name } // Save the files. for _, f := range files { - r.saveFile(f) + r.saveFile(fileToSiaFile(f)) } return names, nil diff --git a/modules/renter/persist_test.go b/modules/renter/persist_test.go index 26fb214c7f..4fd49bb673 100644 --- a/modules/renter/persist_test.go +++ b/modules/renter/persist_test.go @@ -9,46 +9,41 @@ import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" - "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" - "github.com/NebulousLabs/Sia/persist" + "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/fastrand" ) // newTestingFile initializes a file object with random parameters. -func newTestingFile() *file { +func newTestingFile() *siafile.SiaFile { data := fastrand.Bytes(8) nData := fastrand.Intn(10) nParity := fastrand.Intn(10) rsc, _ := NewRSCode(nData+1, nParity+1) - return &file{ - name: "testfile-" + strconv.Itoa(int(data[0])), - size: encoding.DecUint64(data[1:5]), - masterKey: crypto.GenerateTwofishKey(), - erasureCode: rsc, - pieceSize: encoding.DecUint64(data[6:8]), - staticUID: persist.RandomSuffix(), - } + name := "testfile-" + strconv.Itoa(int(data[0])) + masterKey := crypto.GenerateTwofishKey() + + return siafile.New(name, rsc, masterKey) } // equalFiles is a helper function that compares two files for equality. -func equalFiles(f1, f2 *file) error { +func equalFiles(f1, f2 *siafile.SiaFile) error { if f1 == nil || f2 == nil { return fmt.Errorf("one or both files are nil") } - if f1.name != f2.name { - return fmt.Errorf("names do not match: %v %v", f1.name, f2.name) + if f1.SiaPath() != f2.SiaPath() { + return fmt.Errorf("names do not match: %v %v", f1.SiaPath(), f2.SiaPath()) } - if f1.size != f2.size { - return fmt.Errorf("sizes do not match: %v %v", f1.size, f2.size) + if f1.Size() != f2.Size() { + return fmt.Errorf("sizes do not match: %v %v", f1.Size(), f2.Size()) } - if f1.masterKey != f2.masterKey { - return fmt.Errorf("keys do not match: %v %v", f1.masterKey, f2.masterKey) + if f1.MasterKey() != f2.MasterKey() { + return fmt.Errorf("keys do not match: %v %v", f1.MasterKey(), f2.MasterKey()) } - if f1.pieceSize != f2.pieceSize { - return fmt.Errorf("pieceSizes do not match: %v %v", f1.pieceSize, f2.pieceSize) + if f1.PieceSize() != f2.PieceSize() { + return fmt.Errorf("pieceSizes do not match: %v %v", f1.PieceSize(), f2.PieceSize()) } return nil } @@ -67,57 +62,57 @@ func TestFileShareLoad(t *testing.T) { // Create a file and add it to the renter. savedFile := newTestingFile() id := rt.renter.mu.Lock() - rt.renter.files[savedFile.name] = savedFile + rt.renter.files[savedFile.SiaPath()] = savedFile rt.renter.mu.Unlock(id) // Share .sia file to disk. path := filepath.Join(build.SiaTestingDir, "renter", t.Name(), "test.sia") - err = rt.renter.ShareFiles([]string{savedFile.name}, path) + err = rt.renter.ShareFiles([]string{savedFile.SiaPath()}, path) if err != nil { t.Fatal(err) } // Remove the file from the renter. - delete(rt.renter.files, savedFile.name) + delete(rt.renter.files, savedFile.SiaPath()) // Load the .sia file back into the renter. names, err := rt.renter.LoadSharedFiles(path) if err != nil { t.Fatal(err) } - if len(names) != 1 || names[0] != savedFile.name { + if len(names) != 1 || names[0] != savedFile.SiaPath() { t.Fatal("nickname not loaded properly:", names) } - err = equalFiles(rt.renter.files[savedFile.name], savedFile) + err = equalFiles(rt.renter.files[savedFile.SiaPath()], savedFile) if err != nil { t.Fatal(err) } // Share and load multiple files. savedFile2 := newTestingFile() - rt.renter.files[savedFile2.name] = savedFile2 + rt.renter.files[savedFile2.SiaPath()] = savedFile2 path = filepath.Join(build.SiaTestingDir, "renter", t.Name(), "test2.sia") - err = rt.renter.ShareFiles([]string{savedFile.name, savedFile2.name}, path) + err = rt.renter.ShareFiles([]string{savedFile.SiaPath(), savedFile2.SiaPath()}, path) if err != nil { t.Fatal(err) } // Remove the files from the renter. - delete(rt.renter.files, savedFile.name) - delete(rt.renter.files, savedFile2.name) + delete(rt.renter.files, savedFile.SiaPath()) + delete(rt.renter.files, savedFile2.SiaPath()) names, err = rt.renter.LoadSharedFiles(path) if err != nil { t.Fatal(nil) } - if len(names) != 2 || (names[0] != savedFile2.name && names[1] != savedFile2.name) { + if len(names) != 2 || (names[0] != savedFile2.SiaPath() && names[1] != savedFile2.SiaPath()) { t.Fatal("nicknames not loaded properly:", names) } - err = equalFiles(rt.renter.files[savedFile.name], savedFile) + err = equalFiles(rt.renter.files[savedFile.SiaPath()], savedFile) if err != nil { t.Fatal(err) } - err = equalFiles(rt.renter.files[savedFile2.name], savedFile2) + err = equalFiles(rt.renter.files[savedFile2.SiaPath()], savedFile2) if err != nil { t.Fatal(err) } @@ -137,26 +132,26 @@ func TestFileShareLoadASCII(t *testing.T) { // Create a file and add it to the renter. savedFile := newTestingFile() id := rt.renter.mu.Lock() - rt.renter.files[savedFile.name] = savedFile + rt.renter.files[savedFile.SiaPath()] = savedFile rt.renter.mu.Unlock(id) - ascii, err := rt.renter.ShareFilesASCII([]string{savedFile.name}) + ascii, err := rt.renter.ShareFilesASCII([]string{savedFile.SiaPath()}) if err != nil { t.Fatal(err) } // Remove the file from the renter. - delete(rt.renter.files, savedFile.name) + delete(rt.renter.files, savedFile.SiaPath()) names, err := rt.renter.LoadSharedFilesASCII(ascii) if err != nil { t.Fatal(err) } - if len(names) != 1 || names[0] != savedFile.name { + if len(names) != 1 || names[0] != savedFile.SiaPath() { t.Fatal("nickname not loaded properly") } - err = equalFiles(rt.renter.files[savedFile.name], savedFile) + err = equalFiles(rt.renter.files[savedFile.SiaPath()], savedFile) if err != nil { t.Fatal(err) } @@ -186,15 +181,15 @@ func TestRenterSaveLoad(t *testing.T) { } // Create and save some files - var f1, f2, f3 *file + var f1, f2, f3 *siafile.SiaFile f1 = newTestingFile() f2 = newTestingFile() f3 = newTestingFile() // names must not conflict - for f2.name == f1.name || f2.name == f3.name { + for f2.SiaPath() == f1.SiaPath() || f2.SiaPath() == f3.SiaPath() { f2 = newTestingFile() } - for f3.name == f1.name || f3.name == f2.name { + for f3.SiaPath() == f1.SiaPath() || f3.SiaPath() == f2.SiaPath() { f3 = newTestingFile() } rt.renter.saveFile(f1) @@ -226,13 +221,13 @@ func TestRenterSaveLoad(t *testing.T) { t.Fatal(err) } - if err := equalFiles(f1, rt.renter.files[f1.name]); err != nil { + if err := equalFiles(f1, rt.renter.files[f1.SiaPath()]); err != nil { t.Fatal(err) } - if err := equalFiles(f2, rt.renter.files[f2.name]); err != nil { + if err := equalFiles(f2, rt.renter.files[f2.SiaPath()]); err != nil { t.Fatal(err) } - if err := equalFiles(f3, rt.renter.files[f3.name]); err != nil { + if err := equalFiles(f3, rt.renter.files[f3.SiaPath()]); err != nil { t.Fatal(err) } @@ -266,11 +261,11 @@ func TestRenterPaths(t *testing.T) { // foo/bar.sia // foo/bar/baz.sia f1 := newTestingFile() - f1.name = "foo" + f1.Rename("foo") f2 := newTestingFile() - f2.name = "foo/bar" + f2.Rename("foo/bar") f3 := newTestingFile() - f3.name = "foo/bar/baz" + f3.Rename("foo/bar/baz") rt.renter.saveFile(f1) rt.renter.saveFile(f2) rt.renter.saveFile(f3) @@ -286,13 +281,13 @@ func TestRenterPaths(t *testing.T) { } // Check that the files were loaded properly. - if err := equalFiles(f1, rt.renter.files[f1.name]); err != nil { + if err := equalFiles(f1, rt.renter.files[f1.SiaPath()]); err != nil { t.Fatal(err) } - if err := equalFiles(f2, rt.renter.files[f2.name]); err != nil { + if err := equalFiles(f2, rt.renter.files[f2.SiaPath()]); err != nil { t.Fatal(err) } - if err := equalFiles(f3, rt.renter.files[f3.name]); err != nil { + if err := equalFiles(f3, rt.renter.files[f3.SiaPath()]); err != nil { t.Fatal(err) } @@ -310,7 +305,7 @@ func TestRenterPaths(t *testing.T) { return nil }) // walk will descend into foo/bar/, reading baz, bar, and finally foo - expWalkStr := (f3.name + ".sia") + (f2.name + ".sia") + (f1.name + ".sia") + expWalkStr := (f3.SiaPath() + ".sia") + (f2.SiaPath() + ".sia") + (f1.SiaPath() + ".sia") if filepath.ToSlash(walkStr) != expWalkStr { t.Fatalf("Bad walk string: expected %v, got %v", expWalkStr, walkStr) } diff --git a/modules/renter/renter.go b/modules/renter/renter.go index 036b428972..d2c15234d4 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -476,7 +476,7 @@ func NewCustomRenter(g modules.Gateway, cs modules.ConsensusSet, tpool modules.T } r := &Renter{ - files: make(map[string]*file), + files: make(map[string]*siafile.SiaFile), // Making newDownloads a buffered channel means that most of the time, a // new download will trigger an unnecessary extra iteration of the diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go index 64a1a5de6c..03a7a444bd 100644 --- a/modules/renter/siafile/metadata.go +++ b/modules/renter/siafile/metadata.go @@ -11,11 +11,12 @@ import ( type ( // Metadata is the metadata of a SiaFile and is JSON encoded. Metadata struct { - version [16]byte // version of the sia file format used - staticFileSize int64 // total size of the file - masterKey crypto.TwofishKey // masterkey used to encrypt pieces - trackingPath string // file to the local copy of the file used for repairing - siaPath string + version [16]byte // version of the sia file format used + fileSize int64 // total size of the file + masterKey crypto.TwofishKey // masterkey used to encrypt pieces + pieceSize uint64 // size of a single piece of the file + trackingPath string // file to the local copy of the file used for repairing + siaPath string // The following fields are the usual unix timestamps of files. modTime time.Time // time of last content modification @@ -50,10 +51,17 @@ type ( ) // Available indicates whether the file is ready to be downloaded. -func (sf *SiaFile) Available(offline map[types.FileContractID]bool) bool { +func (sf *SiaFile) Available(offline map[string]bool) bool { panic("not implemented yet") } +// ChunkSize returns the size of a single chunk of the file. +func (sf *SiaFile) ChunkSize() uint64 { + sf.mu.RLock() + defer sf.mu.RUnlock() + return sf.chunkSize() +} + // Delete removes the file from disk and marks it as deleted. Once the file is // deleted, certain methods should return an error. func (sf *SiaFile) Delete() error { @@ -77,16 +85,30 @@ func (sf *SiaFile) HostPublicKeys() []types.SiaPublicKey { panic("not implemented yet") } +// MasterKey returns the masterkey used to encrypt the file. +func (sf *SiaFile) MasterKey() crypto.TwofishKey { + sf.mu.RLock() + sf.mu.RUnlock() + return sf.metadata.masterKey +} + // Mode returns the FileMode of the SiaFile. func (sf *SiaFile) Mode() os.FileMode { panic("not implemented yet") } +// PieceSize returns the size of a single piece of the file. +func (sf *SiaFile) PieceSize() uint64 { + sf.mu.RLock() + defer sf.mu.RUnlock() + return sf.metadata.pieceSize +} + // Redundancy returns the redundancy of the least redundant chunk. A file // becomes available when this redundancy is >= 1. Assumes that every piece is // unique within a file contract. -1 is returned if the file has size 0. It // takes one argument, a map of offline contracts for this file. -func (sf *SiaFile) Redundancy(offlineMap map[types.FileContractID]bool, goodForRenewMap map[types.FileContractID]bool) float64 { +func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) float64 { panic("not implemented yet") } @@ -95,6 +117,13 @@ func (sf *SiaFile) Rename(newName string) string { panic("not implemented yet") } +// SetMode sets the filemode of the sia file. +func (sf *SiaFile) SetMode(mode os.FileMode) { + sf.mu.Lock() + defer sf.mu.Unlock() + sf.metadata.mode = mode +} + // SiaPath returns the file's sia path. func (sf *SiaFile) SiaPath() string { panic("not implemented yet") @@ -118,3 +147,8 @@ func (sf *SiaFile) UploadedBytes() uint64 { func (sf *SiaFile) UploadProgress() float64 { panic("not implemented yet") } + +// ChunkSize returns the size of a single chunk of the file. +func (sf *SiaFile) chunkSize() uint64 { + return sf.metadata.pieceSize * uint64(sf.erasureCode.MinPieces()) +} diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index 04947f3e96..bf054606cb 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -1,10 +1,12 @@ package siafile import ( + "encoding/base32" "sync" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" + "github.com/NebulousLabs/fastrand" "github.com/NebulousLabs/Sia/crypto" ) @@ -31,7 +33,7 @@ type ( // utility fields. These are not persisted. erasureCode modules.ErasureCoder - mu sync.Mutex + mu sync.RWMutex uid string } @@ -63,8 +65,69 @@ type ( // Piece represents a single piece of a chunk on disk Piece struct { - keyNonce [4]byte // nonce used for encrypting the piece - pubKeyOff uint16 // offset in the pubKeyTable - root crypto.Hash // merkle root of the piece + KeyNonce [4]byte // nonce used for encrypting the piece + HostPubKey types.SiaPublicKey // public key of the host + MerkleRoot crypto.Hash // merkle root of the piece } ) + +// New create a new SiaFile. +func New(siaPath string, erasureCode modules.ErasureCoder, masterKey crypto.TwofishKey) *SiaFile { + file := &SiaFile{ + metadata: Metadata{ + masterKey: masterKey, + pieceSize: modules.SectorSize - crypto.TwofishOverhead, + siaPath: siaPath, + }, + erasureCode: erasureCode, + uid: base32.StdEncoding.EncodeToString(fastrand.Bytes(20))[:20], + } + return file +} + +// AddPiece adds an uploaded piece to the file. It also updates the host table +// if the public key of the host is not aleady known. +func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64, merkleRoot crypto.Hash) error { + panic("Not implemented yet") +} + +// ErasureCode returns the erasure coder used by the file. +func (sf *SiaFile) ErasureCode() modules.ErasureCoder { + sf.mu.RLock() + sf.mu.RUnlock() + return sf.erasureCode +} + +// NumChunks returns the number of chunks the file consists of. +func (sf *SiaFile) NumChunks() uint64 { + // empty files still need at least one chunk + if sf.metadata.fileSize == 0 { + return 1 + } + n := uint64(sf.metadata.fileSize) / sf.chunkSize() + // last chunk will be padded, unless chunkSize divides file evenly. + if uint64(sf.metadata.fileSize)%sf.chunkSize() != 0 { + n++ + } + return n +} + +// NumPieces returns the number of pieces each chunk in the file consists of. +func (sf *SiaFile) NumPieces() uint64 { + sf.mu.RLock() + defer sf.mu.RUnlock() + return uint64(sf.erasureCode.NumPieces()) +} + +// Piece returns the piece the index pieceIndex from within the chunk at the +// index chunkIndex. +func (sf *SiaFile) Piece(chunkIndex, pieceIndex uint64) (Piece, error) { + // TODO should return a deep copy to make sure that the caller can't modify + // the chunks without holding a lock. + panic("Not implemented yet") +} + +// UID returns a unique identifier for this file. +func (sf *SiaFile) UID() string { + panic("Not implemented yet") +} diff --git a/modules/renter/upload.go b/modules/renter/upload.go index e8040ff919..62b8fc1f24 100644 --- a/modules/renter/upload.go +++ b/modules/renter/upload.go @@ -81,8 +81,8 @@ func (r *Renter) Upload(up modules.FileUploadParams) error { } // Create file object. - f := newFile(up.SiaPath, up.ErasureCode, pieceSize, uint64(fileInfo.Size())) - f.mode = uint32(fileInfo.Mode()) + f := fileToSiaFile(newFile(up.SiaPath, up.ErasureCode, pieceSize, uint64(fileInfo.Size()))) + f.SetMode(fileInfo.Mode()) // Add file to renter. lockID = r.mu.Lock() diff --git a/modules/renter/uploadchunk.go b/modules/renter/uploadchunk.go index ae12331444..5daed93ad4 100644 --- a/modules/renter/uploadchunk.go +++ b/modules/renter/uploadchunk.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/NebulousLabs/Sia/crypto" + "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/errors" ) @@ -23,7 +24,7 @@ type unfinishedUploadChunk struct { // is known not to exist locally. id uploadChunkID localPath string - renterFile *file + renterFile *siafile.SiaFile // Information about the chunk, namely where it exists within the file. // @@ -129,8 +130,8 @@ func (r *Renter) managedDownloadLogicalChunkData(chunk *unfinishedUploadChunk) e // TODO: There is a disparity in the way that the upload and download code // handle the last chunk, which may not be full sized. downloadLength := chunk.length - if chunk.index == chunk.renterFile.numChunks()-1 && chunk.renterFile.size%chunk.length != 0 { - downloadLength = chunk.renterFile.size % chunk.length + if chunk.index == chunk.renterFile.NumChunks()-1 && chunk.renterFile.Size()%chunk.length != 0 { + downloadLength = chunk.renterFile.Size() % chunk.length } // Create the download. @@ -176,7 +177,7 @@ func (r *Renter) managedDownloadLogicalChunkData(chunk *unfinishedUploadChunk) e func (r *Renter) managedFetchAndRepairChunk(chunk *unfinishedUploadChunk) { // Calculate the amount of memory needed for erasure coding. This will need // to be released if there's an error before erasure coding is complete. - erasureCodingMemory := chunk.renterFile.pieceSize * uint64(chunk.renterFile.erasureCode.MinPieces()) + erasureCodingMemory := chunk.renterFile.PieceSize() * uint64(chunk.renterFile.ErasureCode().MinPieces()) // Calculate the amount of memory to release due to already completed // pieces. This memory gets released during encryption, but needs to be @@ -184,7 +185,7 @@ func (r *Renter) managedFetchAndRepairChunk(chunk *unfinishedUploadChunk) { var pieceCompletedMemory uint64 for i := 0; i < len(chunk.pieceUsage); i++ { if chunk.pieceUsage[i] { - pieceCompletedMemory += chunk.renterFile.pieceSize + crypto.TwofishOverhead + pieceCompletedMemory += chunk.renterFile.PieceSize() + crypto.TwofishOverhead } } @@ -221,7 +222,7 @@ func (r *Renter) managedFetchAndRepairChunk(chunk *unfinishedUploadChunk) { // fact to reduce the total memory required to create the physical data. // That will also change the amount of memory we need to allocate, and the // number of times we need to return memory. - chunk.physicalChunkData, err = chunk.renterFile.erasureCode.EncodeShards(chunk.logicalChunkData) + chunk.physicalChunkData, err = chunk.renterFile.ErasureCode().EncodeShards(chunk.logicalChunkData) chunk.logicalChunkData = nil r.memoryManager.Return(erasureCodingMemory) chunk.memoryReleased += erasureCodingMemory @@ -251,7 +252,7 @@ func (r *Renter) managedFetchAndRepairChunk(chunk *unfinishedUploadChunk) { chunk.physicalChunkData[i] = nil } else { // Encrypt the piece. - key := deriveKey(chunk.renterFile.masterKey, chunk.index, uint64(i)) + key := deriveKey(chunk.renterFile.MasterKey(), chunk.index, uint64(i)) chunk.physicalChunkData[i] = key.EncryptBytes(chunk.physicalChunkData[i]) } } @@ -320,6 +321,7 @@ func (r *Renter) managedFetchLogicalChunkData(chunk *unfinishedUploadChunk) erro // cleanup required. This can include returning rememory and releasing the chunk // from the map of active chunks in the chunk heap. func (r *Renter) managedCleanUpUploadChunk(uc *unfinishedUploadChunk) { + pieceSize := uc.renterFile.PieceSize() uc.mu.Lock() piecesAvailable := 0 var memoryReleased uint64 @@ -336,7 +338,7 @@ func (r *Renter) managedCleanUpUploadChunk(uc *unfinishedUploadChunk) { // will prefer releasing later pieces, which improves computational // complexity for erasure coding. if piecesAvailable >= uc.workersRemaining { - memoryReleased += uc.renterFile.pieceSize + crypto.TwofishOverhead + memoryReleased += pieceSize + crypto.TwofishOverhead uc.physicalChunkData[i] = nil // Mark this piece as taken so that we don't double release memory. uc.pieceUsage[i] = true diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index 920049bb80..1c1f29b8e0 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -24,6 +24,7 @@ import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" + "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/Sia/types" ) @@ -68,11 +69,11 @@ func (uch *uploadChunkHeap) Pop() interface{} { func (uh *uploadHeap) managedPush(uuc *unfinishedUploadChunk) { // Create the unique chunk id. ucid := uploadChunkID{ - fileUID: uuc.renterFile.staticUID, + fileUID: uuc.renterFile.UID(), index: uuc.index, } // Sanity check: fileUID should not be the empty value. - if uuc.renterFile.staticUID == "" { + if uuc.renterFile.UID() == "" { panic("empty string for file UID") } @@ -102,19 +103,15 @@ func (uh *uploadHeap) managedPop() (uc *unfinishedUploadChunk) { // TODO / NOTE: This code can be substantially simplified once the files store // the HostPubKey instead of the FileContractID, and can be simplified even // further once the layout is per-chunk instead of per-filecontract. -func (r *Renter) buildUnfinishedChunks(f *file, hosts map[string]struct{}) []*unfinishedUploadChunk { - // Files are not threadsafe. - f.mu.Lock() - defer f.mu.Unlock() - +func (r *Renter) buildUnfinishedChunks(f *siafile.SiaFile, hosts map[string]struct{}) []*unfinishedUploadChunk { // If the file is not being tracked, don't repair it. - trackedFile, exists := r.persist.Tracking[f.name] + trackedFile, exists := r.persist.Tracking[f.SiaPath()] if !exists { return nil } // If we don't have enough workers for the file, don't repair it right now. - if len(r.workerPool) < f.erasureCode.MinPieces() { + if len(r.workerPool) < f.ErasureCode().MinPieces() { return nil } @@ -123,7 +120,7 @@ func (r *Renter) buildUnfinishedChunks(f *file, hosts map[string]struct{}) []*un // TODO / NOTE: Future files may have a different method for determining the // number of chunks. Changes will be made due to things like sparse files, // and the fact that chunks are going to be different sizes. - chunkCount := f.numChunks() + chunkCount := f.NumChunks() newUnfinishedChunks := make([]*unfinishedUploadChunk, chunkCount) for i := uint64(0); i < chunkCount; i++ { newUnfinishedChunks[i] = &unfinishedUploadChunk{ @@ -131,13 +128,13 @@ func (r *Renter) buildUnfinishedChunks(f *file, hosts map[string]struct{}) []*un localPath: trackedFile.RepairPath, id: uploadChunkID{ - fileUID: f.staticUID, + fileUID: f.UID(), index: i, }, index: i, - length: f.staticChunkSize(), - offset: int64(i * f.staticChunkSize()), + length: f.ChunkSize(), + offset: int64(i * f.ChunkSize()), // memoryNeeded has to also include the logical data, and also // include the overhead for encryption. @@ -148,13 +145,13 @@ func (r *Renter) buildUnfinishedChunks(f *file, hosts map[string]struct{}) []*un // TODO: Currently we request memory for all of the pieces as well // as the minimum pieces, but we perhaps don't need to request all // of that. - memoryNeeded: f.pieceSize*uint64(f.erasureCode.NumPieces()+f.erasureCode.MinPieces()) + uint64(f.erasureCode.NumPieces()*crypto.TwofishOverhead), - minimumPieces: f.erasureCode.MinPieces(), - piecesNeeded: f.erasureCode.NumPieces(), + memoryNeeded: f.PieceSize()*uint64(f.ErasureCode().NumPieces()+f.ErasureCode().MinPieces()) + uint64(f.ErasureCode().NumPieces()*crypto.TwofishOverhead), + minimumPieces: f.ErasureCode().MinPieces(), + piecesNeeded: f.ErasureCode().NumPieces(), - physicalChunkData: make([][]byte, f.erasureCode.NumPieces()), + physicalChunkData: make([][]byte, f.ErasureCode().NumPieces()), - pieceUsage: make([]bool, f.erasureCode.NumPieces()), + pieceUsage: make([]bool, f.ErasureCode().NumPieces()), unusedHosts: make(map[string]struct{}), } // Every chunk can have a different set of unused hosts. @@ -163,59 +160,59 @@ func (r *Renter) buildUnfinishedChunks(f *file, hosts map[string]struct{}) []*un } } - // Iterate through the contracts of the file and mark which hosts are - // already in use for the chunk. As you delete hosts from the 'unusedHosts' - // map, also increment the 'piecesCompleted' value. - saveFile := false - for fcid, fileContract := range f.contracts { - pk := r.hostContractor.ResolveIDToPubKey(fcid) - recentContract, exists := r.hostContractor.ContractByPublicKey(pk) - contractUtility, exists2 := r.hostContractor.ContractUtility(pk) - if exists != exists2 { - build.Critical("got a contract without utility or vice versa which shouldn't happen", - exists, exists2) - } - if !exists || !exists2 { - // File contract does not seem to be part of the host anymore. - // Delete this contract and mark the file to be saved. - delete(f.contracts, fcid) - saveFile = true - continue - } - if !contractUtility.GoodForRenew { - // We are no longer renewing with this contract, so it does not - // count for redundancy. - continue - } - hpk := recentContract.HostPublicKey + // Build a map of host public keys. + pks := make(map[string]types.SiaPublicKey) + for _, pk := range f.HostPublicKeys() { + pks[string(pk.Key)] = pk + } + + // Iterate through the pieces of the file and mark which hosts are already + // in use for the chunk. As you delete hosts from the 'unusedHosts' map, + // also increment the 'piecesCompleted' value. + for i := uint64(0); i < f.NumChunks(); i++ { + for j := uint64(0); j < f.NumPieces(); j++ { + // Get the piece. + piece, err := f.Piece(i, j) + if err != nil { + r.log.Println("failed to get piece for building incomplete chunks") + return nil + } + + // Get the contract for the piece. + pk, exists := pks[string(piece.HostPubKey.Key)] + if !exists { + build.Critical("Couldn't find public key in map. This should never happen") + } + contractUtility, exists2 := r.hostContractor.ContractUtility(pk) + if exists != exists2 { + build.Critical("got a contract without utility or vice versa which shouldn't happen", + exists, exists2) + } + if !exists || !exists2 { + // File contract does not seem to be part of the host anymore. + continue + } + if !contractUtility.GoodForRenew { + // We are no longer renewing with this contract, so it does not + // count for redundancy. + continue + } - // Mark the chunk set based on the pieces in this contract. - for _, piece := range fileContract.Pieces { - _, exists := newUnfinishedChunks[piece.Chunk].unusedHosts[hpk.String()] - redundantPiece := newUnfinishedChunks[piece.Chunk].pieceUsage[piece.Piece] + // Mark the chunk set based on the pieces in this contract. + _, exists = newUnfinishedChunks[i].unusedHosts[pk.String()] + redundantPiece := newUnfinishedChunks[i].pieceUsage[j] if exists && !redundantPiece { - newUnfinishedChunks[piece.Chunk].pieceUsage[piece.Piece] = true - newUnfinishedChunks[piece.Chunk].piecesCompleted++ - delete(newUnfinishedChunks[piece.Chunk].unusedHosts, hpk.String()) + newUnfinishedChunks[i].pieceUsage[j] = true + newUnfinishedChunks[i].piecesCompleted++ + delete(newUnfinishedChunks[i].unusedHosts, pk.String()) } else if exists { // This host has a piece, but it is the same piece another host // has. We should still remove the host from the unusedHosts // since one host having multiple pieces of a chunk might lead // to unexpected issues. - delete(newUnfinishedChunks[piece.Chunk].unusedHosts, hpk.String()) + delete(newUnfinishedChunks[i].unusedHosts, pk.String()) } - } - } - // If 'saveFile' is marked, it means we deleted some dead contracts and - // cleaned up the file a bit. Save the file to clean up some space on disk - // and prevent the same work from being repeated after the next restart. - // - // TODO / NOTE: This process isn't going to make sense anymore once we - // switch to chunk-based saving. - if saveFile { - err := r.saveFile(f) - if err != nil { - r.log.Println("error while saving a file after pruning some contracts from it:", err) + } } @@ -235,20 +232,27 @@ func (r *Renter) buildUnfinishedChunks(f *file, hosts map[string]struct{}) []*un // managedBuildChunkHeap will iterate through all of the files in the renter and // construct a chunk heap. func (r *Renter) managedBuildChunkHeap(hosts map[string]struct{}) { + // Save host keys in map. We can't do that under the same lock since we + // need to call a public method on the file. + pks := make(map[string]types.SiaPublicKey) + goodForRenew := make(map[string]bool) + offline := make(map[string]bool) + for _, f := range r.files { + for _, pk := range f.HostPublicKeys() { + pks[string(pk.Key)] = pk + } + } // Loop through the whole set of files and get a list of chunks to add to // the heap. - id := r.mu.RLock() - goodForRenew := make(map[types.FileContractID]bool) - offline := make(map[types.FileContractID]bool) for _, file := range r.files { - file.mu.RLock() - for cid := range file.contracts { - resolvedID := r.hostContractor.ResolveIDToPubKey(cid) - cu, ok := r.hostContractor.ContractUtility(resolvedID) - goodForRenew[cid] = ok && cu.GoodForRenew - offline[cid] = r.hostContractor.IsOffline(resolvedID) + for _, pk := range pks { + cu, ok := r.hostContractor.ContractUtility(pk) + if !ok { + continue + } + goodForRenew[string(pk.Key)] = ok && cu.GoodForRenew + offline[string(pk.Key)] = r.hostContractor.IsOffline(pk) } - file.mu.RUnlock() unfinishedUploadChunks := r.buildUnfinishedChunks(file, hosts) for i := 0; i < len(unfinishedUploadChunks); i++ { @@ -256,19 +260,18 @@ func (r *Renter) managedBuildChunkHeap(hosts map[string]struct{}) { } } for _, file := range r.files { - file.mu.RLock() // check for local file - tf, exists := r.persist.Tracking[file.name] + id := r.mu.RLock() + tf, exists := r.persist.Tracking[file.SiaPath()] + r.mu.RUnlock(id) if exists { // Check if local file is missing and redundancy is less than 1 // log warning to renter log - if _, err := os.Stat(tf.RepairPath); os.IsNotExist(err) && file.redundancy(offline, goodForRenew) < 1 { + if _, err := os.Stat(tf.RepairPath); os.IsNotExist(err) && file.Redundancy(offline, goodForRenew) < 1 { r.log.Println("File not found on disk and possibly unrecoverable:", tf.RepairPath) } } - file.mu.RUnlock() } - r.mu.RUnlock(id) } // managedPrepareNextChunk takes the next chunk from the chunk heap and prepares diff --git a/modules/renter/workerupload.go b/modules/renter/workerupload.go index 4868bfe998..e984a76554 100644 --- a/modules/renter/workerupload.go +++ b/modules/renter/workerupload.go @@ -118,27 +118,16 @@ func (w *worker) managedUpload(uc *unfinishedUploadChunk, pieceIndex uint64) { w.uploadConsecutiveFailures = 0 w.mu.Unlock() - // Update the renter metadata. - addr := e.Address() - endHeight := e.EndHeight() - id := w.renter.mu.Lock() - uc.renterFile.mu.Lock() - contract, exists := uc.renterFile.contracts[w.contract.ID] - if !exists { - contract = fileContract{ - ID: w.contract.ID, - IP: addr, - WindowStart: endHeight, - } + // Add piece to renterFile + err = uc.renterFile.AddPiece(w.contract.HostPublicKey, uc.index, pieceIndex, root) + if err != nil { + w.renter.log.Debugln("Worker failed to add new piece to SiaFile:", err) + w.managedUploadFailed(uc, pieceIndex) + return } - contract.Pieces = append(contract.Pieces, pieceData{ - Chunk: uc.index, - Piece: pieceIndex, - MerkleRoot: root, - }) - uc.renterFile.contracts[w.contract.ID] = contract + + id := w.renter.mu.Lock() w.renter.saveFile(uc.renterFile) - uc.renterFile.mu.Unlock() w.renter.mu.Unlock(id) // Upload is complete. Update the state of the chunk and the renter's memory From 22c7ed4b0a8e2fe7ae2abcff12da517e8b403c2b Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 19 Jun 2018 13:18:13 -0400 Subject: [PATCH 07/15] Implement all methods except fileToSiaFile and siaFileToFile --- modules/renter/download.go | 22 +-- modules/renter/files.go | 199 ++----------------------- modules/renter/files_test.go | 231 +++++++++++++---------------- modules/renter/persist_test.go | 4 +- modules/renter/siafile/metadata.go | 165 +++++++++++++++++++-- modules/renter/siafile/siafile.go | 81 +++++++--- modules/renter/upload.go | 3 +- modules/renter/uploadheap.go | 85 ++++++----- 8 files changed, 386 insertions(+), 404 deletions(-) diff --git a/modules/renter/download.go b/modules/renter/download.go index 7bcac46893..8d0dfba849 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -384,16 +384,18 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { // For each chunk, assemble a mapping from the contract id to the index of // the piece within the chunk that the contract is responsible for. chunkMaps := make([]map[string]downloadPieceInfo, maxChunk-minChunk+1) - for i := range chunkMaps { - chunkMaps[i] = make(map[string]downloadPieceInfo) - for j := uint64(0); j < uint64(params.file.NumPieces()); j++ { - piece, err := params.file.Piece(uint64(i), j) - if err != nil { - return nil, err - } - chunkMaps[i][string(piece.HostPubKey.Key)] = downloadPieceInfo{ - index: j, - root: piece.MerkleRoot, + for chunkIndex := range chunkMaps { + chunkMaps[chunkIndex] = make(map[string]downloadPieceInfo) + pieces, err := params.file.Pieces(uint64(chunkIndex)) + if err != nil { + return nil, err + } + for pieceIndex, pieceSet := range pieces { + for _, piece := range pieceSet { + chunkMaps[chunkIndex][string(piece.HostPubKey.Key)] = downloadPieceInfo{ + index: uint64(pieceIndex), + root: piece.MerkleRoot, + } } } diff --git a/modules/renter/files.go b/modules/renter/files.go index de57fdf72b..c71e287901 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -1,17 +1,13 @@ package renter import ( - "fmt" - "math" "os" "path/filepath" "sync" - "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/siafile" - "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/errors" @@ -72,183 +68,6 @@ func deriveKey(masterKey crypto.TwofishKey, chunkIndex, pieceIndex uint64) crypt return crypto.TwofishKey(crypto.HashAll(masterKey, chunkIndex, pieceIndex)) } -// staticChunkSize returns the size of one chunk. -func (f *file) staticChunkSize() uint64 { - return f.pieceSize * uint64(f.erasureCode.MinPieces()) -} - -// numChunks returns the number of chunks that f was split into. -func (f *file) numChunks() uint64 { - // empty files still need at least one chunk - if f.size == 0 { - return 1 - } - n := f.size / f.staticChunkSize() - // last chunk will be padded, unless chunkSize divides file evenly. - if f.size%f.staticChunkSize() != 0 { - n++ - } - return n -} - -// available indicates whether the file is ready to be downloaded. -func (f *file) available(offline map[types.FileContractID]bool) bool { - chunkPieces := make([]int, f.numChunks()) - for _, fc := range f.contracts { - if offline[fc.ID] { - continue - } - for _, p := range fc.Pieces { - chunkPieces[p.Chunk]++ - } - } - for _, n := range chunkPieces { - if n < f.erasureCode.MinPieces() { - return false - } - } - return true -} - -// uploadedBytes indicates how many bytes of the file have been uploaded via -// current file contracts. Note that this includes padding and redundancy, so -// uploadedBytes can return a value much larger than the file's original filesize. -func (f *file) uploadedBytes() uint64 { - var uploaded uint64 - for _, fc := range f.contracts { - // Note: we need to multiply by SectorSize here instead of - // f.pieceSize because the actual bytes uploaded include overhead - // from TwoFish encryption - uploaded += uint64(len(fc.Pieces)) * modules.SectorSize - } - return uploaded -} - -// uploadProgress indicates what percentage of the file (plus redundancy) has -// been uploaded. Note that a file may be Available long before UploadProgress -// reaches 100%, and UploadProgress may report a value greater than 100%. -func (f *file) uploadProgress() float64 { - uploaded := f.uploadedBytes() - desired := modules.SectorSize * uint64(f.erasureCode.NumPieces()) * f.numChunks() - - return math.Min(100*(float64(uploaded)/float64(desired)), 100) -} - -// redundancy returns the redundancy of the least redundant chunk. A file -// becomes available when this redundancy is >= 1. Assumes that every piece is -// unique within a file contract. -1 is returned if the file has size 0. It -// takes one argument, a map of offline contracts for this file. -func (f *file) redundancy(offlineMap map[types.FileContractID]bool, goodForRenewMap map[types.FileContractID]bool) float64 { - if f.size == 0 { - return -1 - } - piecesPerChunk := make([]int, f.numChunks()) - piecesPerChunkNoRenew := make([]int, f.numChunks()) - // If the file has non-0 size then the number of chunks should also be - // non-0. Therefore the f.size == 0 conditional block above must appear - // before this check. - if len(piecesPerChunk) == 0 { - build.Critical("cannot get redundancy of a file with 0 chunks") - return -1 - } - // pieceRenewMap stores each encountered piece and a boolean to indicate if - // that piece was already encountered on a goodForRenew contract. - pieceRenewMap := make(map[string]bool) - for _, fc := range f.contracts { - offline, exists1 := offlineMap[fc.ID] - goodForRenew, exists2 := goodForRenewMap[fc.ID] - if exists1 != exists2 { - build.Critical("contract can't be in one map but not in the other") - } - if !exists1 { - continue - } - - // do not count pieces from the contract if the contract is offline - if offline { - continue - } - for _, p := range fc.Pieces { - pieceKey := fmt.Sprintf("%v/%v", p.Chunk, p.Piece) - // If the piece is redundant we need to check if the same piece was - // encountered on a goodForRenew contract before. If it wasn't we - // need to increase the piecesPerChunk counter and set the value of - // the pieceKey entry to true. Otherwise we just ignore the piece. - if gfr, redundant := pieceRenewMap[pieceKey]; redundant && gfr { - continue - } else if redundant && !gfr { - pieceRenewMap[pieceKey] = true - piecesPerChunk[p.Chunk]++ - continue - } - pieceRenewMap[pieceKey] = goodForRenew - - // If the contract is goodForRenew, increment the entry in both - // maps. If not, only the one in piecesPerChunkNoRenew. - if goodForRenew { - piecesPerChunk[p.Chunk]++ - } - piecesPerChunkNoRenew[p.Chunk]++ - } - } - // Find the chunk with the least finished pieces counting only pieces of - // contracts that are goodForRenew. - minPieces := piecesPerChunk[0] - for _, numPieces := range piecesPerChunk { - if numPieces < minPieces { - minPieces = numPieces - } - } - // Find the chunk with the least finished pieces including pieces from - // contracts that are not good for renewal. - minPiecesNoRenew := piecesPerChunkNoRenew[0] - for _, numPieces := range piecesPerChunkNoRenew { - if numPieces < minPiecesNoRenew { - minPiecesNoRenew = numPieces - } - } - // If the redundancy is smaller than 1x we return the redundancy that - // includes contracts that are not good for renewal. The reason for this is - // a better user experience. If the renter operates correctly, redundancy - // should never go above numPieces / minPieces and redundancyNoRenew should - // never go below 1. - redundancy := float64(minPieces) / float64(f.erasureCode.MinPieces()) - redundancyNoRenew := float64(minPiecesNoRenew) / float64(f.erasureCode.MinPieces()) - if redundancy < 1 { - return redundancyNoRenew - } - return redundancy -} - -// expiration returns the lowest height at which any of the file's contracts -// will expire. -func (f *file) expiration() types.BlockHeight { - if len(f.contracts) == 0 { - return 0 - } - lowest := ^types.BlockHeight(0) - for _, fc := range f.contracts { - if fc.WindowStart < lowest { - lowest = fc.WindowStart - } - } - return lowest -} - -// newFile creates a new file object. -func newFile(name string, code modules.ErasureCoder, pieceSize, fileSize uint64) *file { - return &file{ - name: name, - size: fileSize, - contracts: make(map[types.FileContractID]fileContract), - masterKey: crypto.GenerateTwofishKey(), - erasureCode: code, - pieceSize: pieceSize, - - staticUID: persist.RandomSuffix(), - } -} - // DeleteFile removes a file entry from the renter and deletes its data from // the hosts it is stored on. // @@ -290,17 +109,19 @@ func (r *Renter) FileList() []modules.FileInfo { } } - // Build 2 maps that map every contract id to its offline and goodForRenew + // Build 2 maps that map every pubkey to its offline and goodForRenew // status. goodForRenew := make(map[string]bool) offline := make(map[string]bool) + contracts := make(map[string]modules.RenterContract) for _, pk := range pks { - cu, ok := r.hostContractor.ContractUtility(pk) + contract, ok := r.hostContractor.ContractByPublicKey(pk) if !ok { continue } - goodForRenew[string(pk.Key)] = ok && cu.GoodForRenew + goodForRenew[string(pk.Key)] = ok && contract.Utility.GoodForRenew offline[string(pk.Key)] = r.hostContractor.IsOffline(pk) + contracts[string(pk.Key)] = contract } // Build the list of FileInfos. @@ -324,7 +145,7 @@ func (r *Renter) FileList() []modules.FileInfo { Redundancy: f.Redundancy(offline, goodForRenew), UploadedBytes: f.UploadedBytes(), UploadProgress: f.UploadProgress(), - Expiration: f.Expiration(), + Expiration: f.Expiration(contracts), }) } return fileList @@ -348,13 +169,15 @@ func (r *Renter) File(siaPath string) (modules.FileInfo, error) { // status. goodForRenew := make(map[string]bool) offline := make(map[string]bool) + contracts := make(map[string]modules.RenterContract) for _, pk := range pks { - cu, ok := r.hostContractor.ContractUtility(pk) + contract, ok := r.hostContractor.ContractByPublicKey(pk) if !ok { continue } - goodForRenew[string(pk.Key)] = ok && cu.GoodForRenew + goodForRenew[string(pk.Key)] = ok && contract.Utility.GoodForRenew offline[string(pk.Key)] = r.hostContractor.IsOffline(pk) + contracts[string(pk.Key)] = contract } // Build the FileInfo @@ -373,7 +196,7 @@ func (r *Renter) File(siaPath string) (modules.FileInfo, error) { Redundancy: file.Redundancy(offline, goodForRenew), UploadedBytes: file.UploadedBytes(), UploadProgress: file.UploadProgress(), - Expiration: file.Expiration(), + Expiration: file.Expiration(contracts), } return fileInfo, nil diff --git a/modules/renter/files_test.go b/modules/renter/files_test.go index cca3c506df..bb1adb19a5 100644 --- a/modules/renter/files_test.go +++ b/modules/renter/files_test.go @@ -5,8 +5,11 @@ import ( "path/filepath" "testing" + "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" + "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/Sia/types" + "github.com/NebulousLabs/errors" ) // TestFileNumChunks checks the numChunks method of the file type. @@ -29,40 +32,34 @@ func TestFileNumChunks(t *testing.T) { for _, test := range tests { rsc, _ := NewRSCode(test.piecesPerChunk, 1) // can't use 0 - f := &file{size: test.size, erasureCode: rsc, pieceSize: test.pieceSize} - if f.numChunks() != test.expNumChunks { - t.Errorf("Test %v: expected %v, got %v", test, test.expNumChunks, f.numChunks()) + f := siafile.New(t.Name(), rsc, test.pieceSize, test.size) + if f.NumChunks() != test.expNumChunks { + t.Errorf("Test %v: expected %v, got %v", test, test.expNumChunks, f.NumChunks()) } } } // TestFileAvailable probes the available method of the file type. func TestFileAvailable(t *testing.T) { - rsc, _ := NewRSCode(1, 10) - f := &file{ - size: 1000, - erasureCode: rsc, - pieceSize: 100, - } - neverOffline := make(map[types.FileContractID]bool) + rsc, _ := NewRSCode(1, 1) // can't use 0 + f := siafile.New(t.Name(), rsc, pieceSize, 100) + neverOffline := make(map[string]bool) - if f.available(neverOffline) { + if f.Available(neverOffline) { t.Error("file should not be available") } - var fc fileContract - for i := uint64(0); i < f.numChunks(); i++ { - fc.Pieces = append(fc.Pieces, pieceData{Chunk: i, Piece: 0}) + for i := uint64(0); i < f.NumChunks(); i++ { + f.AddPiece(types.SiaPublicKey{}, i, 0, crypto.Hash{}) } - f.contracts = map[types.FileContractID]fileContract{{}: fc} - if !f.available(neverOffline) { + if !f.Available(neverOffline) { t.Error("file should be available") } - specificOffline := make(map[types.FileContractID]bool) - specificOffline[fc.ID] = true - if f.available(specificOffline) { + specificOffline := make(map[string]bool) + specificOffline[string(types.SiaPublicKey{}.Key)] = true + if f.Available(specificOffline) { t.Error("file should not be available") } } @@ -70,35 +67,34 @@ func TestFileAvailable(t *testing.T) { // TestFileUploadedBytes tests that uploadedBytes() returns a value equal to // the number of sectors stored via contract times the size of each sector. func TestFileUploadedBytes(t *testing.T) { - f := &file{} // ensure that a piece fits within a sector - f.pieceSize = modules.SectorSize / 2 - f.contracts = make(map[types.FileContractID]fileContract) - f.contracts[types.FileContractID{}] = fileContract{ - ID: types.FileContractID{}, - IP: modules.NetAddress(""), - Pieces: make([]pieceData, 4), + rsc, _ := NewRSCode(1, 3) + f := siafile.New(t.Name(), rsc, modules.SectorSize/2, 1000) + for i := uint64(0); i < 4; i++ { + err := f.AddPiece(types.SiaPublicKey{}, uint64(0), i, crypto.Hash{}) + if err != nil { + t.Fatal(err) + } } - if f.uploadedBytes() != 4*modules.SectorSize { - t.Errorf("expected uploadedBytes to be 8, got %v", f.uploadedBytes()) + if f.UploadedBytes() != 4*modules.SectorSize { + t.Errorf("expected uploadedBytes to be 8, got %v", f.UploadedBytes()) } } // TestFileUploadProgressPinning verifies that uploadProgress() returns at most // 100%, even if more pieces have been uploaded, func TestFileUploadProgressPinning(t *testing.T) { - f := &file{} - f.pieceSize = 2 - f.contracts = make(map[types.FileContractID]fileContract) - f.contracts[types.FileContractID{}] = fileContract{ - ID: types.FileContractID{}, - IP: modules.NetAddress(""), - Pieces: make([]pieceData, 4), - } rsc, _ := NewRSCode(1, 1) - f.erasureCode = rsc - if f.uploadProgress() != 100 { - t.Fatal("expected uploadProgress to report 100%") + f := siafile.New(t.Name(), rsc, 2, 4) + for i := uint64(0); i < 2; i++ { + err1 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, uint64(0), i, crypto.Hash{}) + err2 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, uint64(0), i, crypto.Hash{}) + if err := errors.Compose(err1, err2); err != nil { + t.Fatal(err) + } + } + if f.UploadProgress() != 100 { + t.Fatal("expected uploadProgress to report 100% but was", f.UploadProgress()) } } @@ -106,113 +102,88 @@ func TestFileUploadProgressPinning(t *testing.T) { // with varying number of filecontracts and erasure code settings. func TestFileRedundancy(t *testing.T) { nDatas := []int{1, 2, 10} - neverOffline := make(map[types.FileContractID]bool) - goodForRenew := make(map[types.FileContractID]bool) - for i := 0; i < 5; i++ { - neverOffline[types.FileContractID{byte(i)}] = false - goodForRenew[types.FileContractID{byte(i)}] = true + neverOffline := make(map[string]bool) + goodForRenew := make(map[string]bool) + for i := 0; i < 6; i++ { + neverOffline[string([]byte{byte(i)})] = false + goodForRenew[string([]byte{byte(i)})] = true } for _, nData := range nDatas { rsc, _ := NewRSCode(nData, 10) - f := &file{ - size: 1000, - pieceSize: 100, - contracts: make(map[types.FileContractID]fileContract), - erasureCode: rsc, - } + f := siafile.New(t.Name(), rsc, 100, 1000) // Test that an empty file has 0 redundancy. - if r := f.redundancy(neverOffline, goodForRenew); r != 0 { + if r := f.Redundancy(neverOffline, goodForRenew); r != 0 { t.Error("expected 0 redundancy, got", r) } - // Test that a file with 1 filecontract that has a piece for every chunk but + // Test that a file with 1 host that has a piece for every chunk but // one chunk still has a redundancy of 0. - fc := fileContract{ - ID: types.FileContractID{0}, - } - for i := uint64(0); i < f.numChunks()-1; i++ { - pd := pieceData{ - Chunk: i, - Piece: 0, + for i := uint64(0); i < f.NumChunks()-1; i++ { + err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, i, 0, crypto.Hash{}) + if err != nil { + t.Fatal(err) } - fc.Pieces = append(fc.Pieces, pd) } - f.contracts[fc.ID] = fc - if r := f.redundancy(neverOffline, goodForRenew); r != 0 { + if r := f.Redundancy(neverOffline, goodForRenew); r != 0 { t.Error("expected 0 redundancy, got", r) } - // Test that adding another filecontract with a piece for every chunk but one + // Test that adding another host with a piece for every chunk but one // chunk still results in a file with redundancy 0. - fc = fileContract{ - ID: types.FileContractID{1}, - } - for i := uint64(0); i < f.numChunks()-1; i++ { - pd := pieceData{ - Chunk: i, - Piece: 1, + for i := uint64(0); i < f.NumChunks()-1; i++ { + err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, i, 1, crypto.Hash{}) + if err != nil { + t.Fatal(err) } - fc.Pieces = append(fc.Pieces, pd) } - f.contracts[fc.ID] = fc - if r := f.redundancy(neverOffline, goodForRenew); r != 0 { + if r := f.Redundancy(neverOffline, goodForRenew); r != 0 { t.Error("expected 0 redundancy, got", r) } // Test that adding a file contract with a piece for the missing chunk // results in a file with redundancy > 0 && <= 1. - fc = fileContract{ - ID: types.FileContractID{2}, + err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(2)}}, f.NumChunks()-1, 0, crypto.Hash{}) + if err != nil { + t.Fatal(err) } - pd := pieceData{ - Chunk: f.numChunks() - 1, - Piece: 0, - } - fc.Pieces = append(fc.Pieces, pd) - f.contracts[fc.ID] = fc // 1.0 / MinPieces because the chunk with the least number of pieces has 1 piece. - expectedR := 1.0 / float64(f.erasureCode.MinPieces()) - if r := f.redundancy(neverOffline, goodForRenew); r != expectedR { + expectedR := 1.0 / float64(f.ErasureCode().MinPieces()) + if r := f.Redundancy(neverOffline, goodForRenew); r != expectedR { t.Errorf("expected %f redundancy, got %f", expectedR, r) } // Test that adding a file contract that has erasureCode.MinPieces() pieces // per chunk for all chunks results in a file with redundancy > 1. - fc = fileContract{ - ID: types.FileContractID{3}, - } - for iChunk := uint64(0); iChunk < f.numChunks(); iChunk++ { - for iPiece := uint64(0); iPiece < uint64(f.erasureCode.MinPieces()); iPiece++ { - fc.Pieces = append(fc.Pieces, pieceData{ - Chunk: iChunk, - // add 1 since the same piece can't count towards redundancy twice. - Piece: iPiece + 1, - }) + for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ { + for iPiece := uint64(1); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ { + err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(3)}}, iChunk, iPiece, crypto.Hash{}) + if err != nil { + t.Fatal(err) + } + } + err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(4)}}, iChunk, uint64(f.ErasureCode().MinPieces()), crypto.Hash{}) + if err != nil { + t.Fatal(err) } } - f.contracts[fc.ID] = fc // 1+MinPieces / MinPieces because the chunk with the least number of pieces has 1+MinPieces pieces. - expectedR = float64(1+f.erasureCode.MinPieces()) / float64(f.erasureCode.MinPieces()) - if r := f.redundancy(neverOffline, goodForRenew); r != expectedR { + expectedR = float64(1+f.ErasureCode().MinPieces()) / float64(f.ErasureCode().MinPieces()) + if r := f.Redundancy(neverOffline, goodForRenew); r != expectedR { t.Errorf("expected %f redundancy, got %f", expectedR, r) } // verify offline file contracts are not counted in the redundancy - fc = fileContract{ - ID: types.FileContractID{4}, - } - for iChunk := uint64(0); iChunk < f.numChunks(); iChunk++ { - for iPiece := uint64(0); iPiece < uint64(f.erasureCode.MinPieces()); iPiece++ { - fc.Pieces = append(fc.Pieces, pieceData{ - Chunk: iChunk, - Piece: iPiece, - }) + for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ { + for iPiece := uint64(0); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ { + err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(5)}}, iChunk, iPiece, crypto.Hash{}) + if err != nil { + t.Fatal(err) + } } } - f.contracts[fc.ID] = fc - specificOffline := make(map[types.FileContractID]bool) - for fcid := range goodForRenew { - specificOffline[fcid] = false + specificOffline := make(map[string]bool) + for pk := range goodForRenew { + specificOffline[pk] = false } - specificOffline[fc.ID] = true - if r := f.redundancy(specificOffline, goodForRenew); r != expectedR { + specificOffline[string(byte(5))] = true + if r := f.Redundancy(specificOffline, goodForRenew); r != expectedR { t.Errorf("expected redundancy to ignore offline file contracts, wanted %f got %f", expectedR, r) } } @@ -220,33 +191,43 @@ func TestFileRedundancy(t *testing.T) { // TestFileExpiration probes the expiration method of the file type. func TestFileExpiration(t *testing.T) { - f := &file{ - contracts: make(map[types.FileContractID]fileContract), + f := newTestingFile() + contracts := make(map[string]modules.RenterContract) + if f.Expiration(contracts) != 0 { + t.Error("file with no pieces should report as having no time remaining") } + // Create 3 public keys + pk1 := types.SiaPublicKey{Key: []byte{0}} + pk2 := types.SiaPublicKey{Key: []byte{1}} + pk3 := types.SiaPublicKey{Key: []byte{2}} - if f.expiration() != 0 { - t.Error("file with no pieces should report as having no time remaining") + // Add a piece for each key to the file. + err1 := f.AddPiece(pk1, 0, 0, crypto.Hash{}) + err2 := f.AddPiece(pk2, 0, 1, crypto.Hash{}) + err3 := f.AddPiece(pk3, 0, 2, crypto.Hash{}) + if err := errors.Compose(err1, err2, err3); err != nil { + t.Fatal(err) } // Add a contract. - fc := fileContract{} - fc.WindowStart = 100 - f.contracts[types.FileContractID{0}] = fc - if f.expiration() != 100 { + fc := modules.RenterContract{} + fc.EndHeight = 100 + contracts[string(pk1.Key)] = fc + if f.Expiration(contracts) != 100 { t.Error("file did not report lowest WindowStart") } // Add a contract with a lower WindowStart. - fc.WindowStart = 50 - f.contracts[types.FileContractID{1}] = fc - if f.expiration() != 50 { + fc.EndHeight = 50 + contracts[string(pk2.Key)] = fc + if f.Expiration(contracts) != 50 { t.Error("file did not report lowest WindowStart") } // Add a contract with a higher WindowStart. - fc.WindowStart = 75 - f.contracts[types.FileContractID{2}] = fc - if f.expiration() != 50 { + fc.EndHeight = 75 + contracts[string(pk3.Key)] = fc + if f.Expiration(contracts) != 50 { t.Error("file did not report lowest WindowStart") } } diff --git a/modules/renter/persist_test.go b/modules/renter/persist_test.go index 4fd49bb673..9c8b7ea0ee 100644 --- a/modules/renter/persist_test.go +++ b/modules/renter/persist_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/NebulousLabs/Sia/build" - "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/siafile" @@ -23,9 +22,8 @@ func newTestingFile() *siafile.SiaFile { rsc, _ := NewRSCode(nData+1, nParity+1) name := "testfile-" + strconv.Itoa(int(data[0])) - masterKey := crypto.GenerateTwofishKey() - return siafile.New(name, rsc, masterKey) + return siafile.New(name, rsc, pieceSize, 1000) } // equalFiles is a helper function that compares two files for equality. diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go index 03a7a444bd..a7ed2f1015 100644 --- a/modules/renter/siafile/metadata.go +++ b/modules/renter/siafile/metadata.go @@ -1,10 +1,13 @@ package siafile import ( + "math" "os" "time" + "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" + "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" ) @@ -52,7 +55,28 @@ type ( // Available indicates whether the file is ready to be downloaded. func (sf *SiaFile) Available(offline map[string]bool) bool { - panic("not implemented yet") + sf.mu.RLock() + defer sf.mu.RUnlock() + // We need to find at least erasureCode.MinPieces different pieces for each + // chunk for the file to be available. + for _, chunk := range sf.chunks { + piecesForChunk := 0 + for _, pieceSet := range chunk.pieces { + for _, piece := range pieceSet { + if !offline[string(piece.HostPubKey.Key)] { + piecesForChunk++ + break // break out since we only count unique pieces + } + } + if piecesForChunk >= sf.erasureCode.MinPieces() { + break // we already have enough pieces for this chunk. + } + } + if piecesForChunk < sf.erasureCode.MinPieces() { + return false // this chunk isn't available. + } + } + return true } // ChunkSize returns the size of a single chunk of the file. @@ -64,25 +88,50 @@ func (sf *SiaFile) ChunkSize() uint64 { // Delete removes the file from disk and marks it as deleted. Once the file is // deleted, certain methods should return an error. +// TODO: This will actually delete the file from disk once we change the +// persistence structure to use the new file format. func (sf *SiaFile) Delete() error { - panic("not implemented yet") + sf.mu.Lock() + defer sf.mu.Unlock() + sf.deleted = true + return nil } // Deleted indicates if this file has been deleted by the user. func (sf *SiaFile) Deleted() bool { - panic("not implemented yet") + sf.mu.RLock() + defer sf.mu.RUnlock() + return sf.deleted } // Expiration returns the lowest height at which any of the file's contracts // will expire. -func (sf *SiaFile) Expiration() types.BlockHeight { - panic("not implemented yet") +func (sf *SiaFile) Expiration(contracts map[string]modules.RenterContract) types.BlockHeight { + sf.mu.RLock() + defer sf.mu.RUnlock() + if len(sf.pubKeyTable) == 0 { + return 0 + } + + lowest := ^types.BlockHeight(0) + for _, pk := range sf.pubKeyTable { + contract, exists := contracts[string(pk.Key)] + if !exists { + continue + } + if contract.EndHeight < lowest { + lowest = contract.EndHeight + } + } + return lowest } // HostPublicKeys returns all the public keys of hosts the file has ever been // uploaded to. That means some of those hosts might no longer be in use. func (sf *SiaFile) HostPublicKeys() []types.SiaPublicKey { - panic("not implemented yet") + sf.mu.RLock() + defer sf.mu.RUnlock() + return sf.pubKeyTable } // MasterKey returns the masterkey used to encrypt the file. @@ -94,7 +143,9 @@ func (sf *SiaFile) MasterKey() crypto.TwofishKey { // Mode returns the FileMode of the SiaFile. func (sf *SiaFile) Mode() os.FileMode { - panic("not implemented yet") + sf.mu.RLock() + defer sf.mu.RUnlock() + return sf.metadata.mode } // PieceSize returns the size of a single piece of the file. @@ -109,12 +160,81 @@ func (sf *SiaFile) PieceSize() uint64 { // unique within a file contract. -1 is returned if the file has size 0. It // takes one argument, a map of offline contracts for this file. func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) float64 { - panic("not implemented yet") + sf.mu.RLock() + sf.mu.RUnlock() + if sf.metadata.fileSize == 0 { + return -1 + } + + minPiecesRenew := ^uint64(0) + minPiecesNoRenew := ^uint64(0) + for _, chunk := range sf.chunks { + // Loop over chunks and remember how many unique pieces of the chunk + // were goodForRenew and how many were not. + numPiecesRenew := uint64(0) + numPiecesNoRenew := uint64(0) + for _, pieceSet := range chunk.pieces { + // Remember if we encountered a goodForRenew piece or a + // !goodForRenew piece that was at least online. + foundGoodForRenew := false + foundOnline := false + for _, piece := range pieceSet { + offline, exists1 := offlineMap[string(piece.HostPubKey.Key)] + goodForRenew, exists2 := goodForRenewMap[string(piece.HostPubKey.Key)] + if exists1 != exists2 { + build.Critical("contract can't be in one map but not in the other") + } + if !exists1 || offline { + continue + } + // If we found a goodForRenew piece we can stop. + if goodForRenew { + foundGoodForRenew = true + break + } + // Otherwise we continue since there might be other hosts with + // the same piece that are goodForRenew. We still remember that + // we found an online piece though. + foundOnline = true + } + if foundGoodForRenew { + numPiecesRenew++ + numPiecesNoRenew++ + } else if foundOnline { + numPiecesNoRenew++ + } + } + // Remember the smallest number of goodForRenew pieces encountered. + if numPiecesRenew < minPiecesRenew { + minPiecesRenew = numPiecesRenew + } + // Remember the smallest number of !goodForRenew pieces encountered. + if numPiecesNoRenew < minPiecesNoRenew { + minPiecesNoRenew = numPiecesNoRenew + } + } + + // If the redundancy is smaller than 1x we return the redundancy that + // includes contracts that are not good for renewal. The reason for this is + // a better user experience. If the renter operates correctly, redundancy + // should never go above numPieces / minPieces and redundancyNoRenew should + // never go below 1. + redundancy := float64(minPiecesRenew) / float64(sf.erasureCode.MinPieces()) + redundancyNoRenew := float64(minPiecesNoRenew) / float64(sf.erasureCode.MinPieces()) + if redundancy < 1 { + return redundancyNoRenew + } + return redundancy } // Rename changes the name of the file to a new one. -func (sf *SiaFile) Rename(newName string) string { - panic("not implemented yet") +// TODO: This will actually rename the file on disk once we persist the new +// file format. +func (sf *SiaFile) Rename(newName string) error { + sf.mu.Lock() + defer sf.mu.Unlock() + sf.metadata.siaPath = newName + return nil } // SetMode sets the filemode of the sia file. @@ -126,26 +246,43 @@ func (sf *SiaFile) SetMode(mode os.FileMode) { // SiaPath returns the file's sia path. func (sf *SiaFile) SiaPath() string { - panic("not implemented yet") + sf.mu.RLock() + defer sf.mu.RUnlock() + return sf.metadata.siaPath } // Size returns the file's size. func (sf *SiaFile) Size() uint64 { - panic("not implemented yet") + sf.mu.RLock() + defer sf.mu.RUnlock() + return uint64(sf.metadata.fileSize) } // UploadedBytes indicates how many bytes of the file have been uploaded via // current file contracts. Note that this includes padding and redundancy, so // uploadedBytes can return a value much larger than the file's original filesize. func (sf *SiaFile) UploadedBytes() uint64 { - panic("not implemented yet") + sf.mu.RLock() + defer sf.mu.RUnlock() + var uploaded uint64 + for _, chunk := range sf.chunks { + for _, pieceSet := range chunk.pieces { + // Note: we need to multiply by SectorSize here instead of + // f.pieceSize because the actual bytes uploaded include overhead + // from TwoFish encryption + uploaded += uint64(len(pieceSet)) * modules.SectorSize + } + } + return uploaded } // UploadProgress indicates what percentage of the file (plus redundancy) has // been uploaded. Note that a file may be Available long before UploadProgress // reaches 100%, and UploadProgress may report a value greater than 100%. func (sf *SiaFile) UploadProgress() float64 { - panic("not implemented yet") + uploaded := sf.UploadedBytes() + desired := modules.SectorSize * uint64(sf.ErasureCode().NumPieces()) * sf.NumChunks() + return math.Min(100*(float64(uploaded)/float64(desired)), 100) } // ChunkSize returns the size of a single chunk of the file. diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index bf054606cb..0864c0b057 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -2,6 +2,9 @@ package siafile import ( "encoding/base32" + "encoding/binary" + "fmt" + "reflect" "sync" "github.com/NebulousLabs/Sia/modules" @@ -32,6 +35,7 @@ type ( chunks []Chunk // utility fields. These are not persisted. + deleted bool erasureCode modules.ErasureCoder mu sync.RWMutex uid string @@ -58,9 +62,7 @@ type ( extensionInfo [16]byte // pieces are the pieces of the file the chunk consists of. - // The number of pieces should equal the number of - // dataPieces + parityPieces - pieces []Piece + pieces [][]Piece } // Piece represents a single piece of a chunk on disk @@ -72,23 +74,61 @@ type ( ) // New create a new SiaFile. -func New(siaPath string, erasureCode modules.ErasureCoder, masterKey crypto.TwofishKey) *SiaFile { +func New(siaPath string, erasureCode modules.ErasureCoder, pieceSize, fileSize uint64) *SiaFile { file := &SiaFile{ metadata: Metadata{ - masterKey: masterKey, - pieceSize: modules.SectorSize - crypto.TwofishOverhead, + fileSize: int64(fileSize), + masterKey: crypto.GenerateTwofishKey(), + pieceSize: pieceSize, siaPath: siaPath, }, erasureCode: erasureCode, uid: base32.StdEncoding.EncodeToString(fastrand.Bytes(20))[:20], } + chunks := make([]Chunk, file.NumChunks()) + for i := range chunks { + chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1} + binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[0:4], uint32(erasureCode.MinPieces())) + binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[4:8], uint32(erasureCode.NumPieces()-erasureCode.MinPieces())) + chunks[i].pieces = make([][]Piece, erasureCode.NumPieces()) + } + file.chunks = chunks return file } // AddPiece adds an uploaded piece to the file. It also updates the host table // if the public key of the host is not aleady known. func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64, merkleRoot crypto.Hash) error { - panic("Not implemented yet") + sf.mu.Lock() + defer sf.mu.Unlock() + + // Get the index of the host in the public key table. + tableIndex := -1 + for i, hpk := range sf.pubKeyTable { + if reflect.DeepEqual(hpk, pk) { + tableIndex = i + break + } + } + // If we don't know the host yet, we add it to the table. + if tableIndex == -1 { + sf.pubKeyTable = append(sf.pubKeyTable, pk) + tableIndex = len(sf.pubKeyTable) - 1 + } + // Check if the chunkIndex is valid. + if chunkIndex >= uint64(len(sf.chunks)) { + return fmt.Errorf("chunkIndex %v out of bounds (%v)", chunkIndex, len(sf.chunks)) + } + // Check if the pieceIndex is valid. + if pieceIndex >= uint64(len(sf.chunks[chunkIndex].pieces)) { + return fmt.Errorf("pieceIndex %v out of bounds (%v)", pieceIndex, len(sf.chunks[chunkIndex].pieces)) + } + // Add the piece to the chunk. + sf.chunks[chunkIndex].pieces[pieceIndex] = append(sf.chunks[chunkIndex].pieces[pieceIndex], Piece{ + HostPubKey: pk, + MerkleRoot: merkleRoot, + }) + return nil } // ErasureCode returns the erasure coder used by the file. @@ -98,7 +138,9 @@ func (sf *SiaFile) ErasureCode() modules.ErasureCoder { return sf.erasureCode } -// NumChunks returns the number of chunks the file consists of. +// NumChunks returns the number of chunks the file consists of. This will +// return the number of chunks the file consists of even if the file is not +// fully uploaded yet. func (sf *SiaFile) NumChunks() uint64 { // empty files still need at least one chunk if sf.metadata.fileSize == 0 { @@ -112,22 +154,21 @@ func (sf *SiaFile) NumChunks() uint64 { return n } -// NumPieces returns the number of pieces each chunk in the file consists of. -func (sf *SiaFile) NumPieces() uint64 { +// Pieces returns all the pieces for a chunk in a slice of slices that contains +// all the pieces for a certain index. +func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) { sf.mu.RLock() defer sf.mu.RUnlock() - return uint64(sf.erasureCode.NumPieces()) -} - -// Piece returns the piece the index pieceIndex from within the chunk at the -// index chunkIndex. -func (sf *SiaFile) Piece(chunkIndex, pieceIndex uint64) (Piece, error) { - // TODO should return a deep copy to make sure that the caller can't modify - // the chunks without holding a lock. - panic("Not implemented yet") + if chunkIndex >= uint64(len(sf.chunks)) { + return nil, fmt.Errorf("index %v out of bounds (%v)", + chunkIndex, len(sf.chunks)) + } + return sf.chunks[chunkIndex].pieces, nil } // UID returns a unique identifier for this file. func (sf *SiaFile) UID() string { - panic("Not implemented yet") + sf.mu.RLock() + defer sf.mu.RUnlock() + return sf.uid } diff --git a/modules/renter/upload.go b/modules/renter/upload.go index 62b8fc1f24..576c28e238 100644 --- a/modules/renter/upload.go +++ b/modules/renter/upload.go @@ -20,6 +20,7 @@ import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" + "github.com/NebulousLabs/Sia/modules/renter/siafile" ) var ( @@ -81,7 +82,7 @@ func (r *Renter) Upload(up modules.FileUploadParams) error { } // Create file object. - f := fileToSiaFile(newFile(up.SiaPath, up.ErasureCode, pieceSize, uint64(fileInfo.Size()))) + f := siafile.New(up.SiaPath, up.ErasureCode, pieceSize, uint64(fileInfo.Size())) f.SetMode(fileInfo.Mode()) // Add file to renter. diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index 1c1f29b8e0..5e8436893a 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -169,50 +169,49 @@ func (r *Renter) buildUnfinishedChunks(f *siafile.SiaFile, hosts map[string]stru // Iterate through the pieces of the file and mark which hosts are already // in use for the chunk. As you delete hosts from the 'unusedHosts' map, // also increment the 'piecesCompleted' value. - for i := uint64(0); i < f.NumChunks(); i++ { - for j := uint64(0); j < f.NumPieces(); j++ { - // Get the piece. - piece, err := f.Piece(i, j) - if err != nil { - r.log.Println("failed to get piece for building incomplete chunks") - return nil - } - - // Get the contract for the piece. - pk, exists := pks[string(piece.HostPubKey.Key)] - if !exists { - build.Critical("Couldn't find public key in map. This should never happen") - } - contractUtility, exists2 := r.hostContractor.ContractUtility(pk) - if exists != exists2 { - build.Critical("got a contract without utility or vice versa which shouldn't happen", - exists, exists2) - } - if !exists || !exists2 { - // File contract does not seem to be part of the host anymore. - continue - } - if !contractUtility.GoodForRenew { - // We are no longer renewing with this contract, so it does not - // count for redundancy. - continue - } - - // Mark the chunk set based on the pieces in this contract. - _, exists = newUnfinishedChunks[i].unusedHosts[pk.String()] - redundantPiece := newUnfinishedChunks[i].pieceUsage[j] - if exists && !redundantPiece { - newUnfinishedChunks[i].pieceUsage[j] = true - newUnfinishedChunks[i].piecesCompleted++ - delete(newUnfinishedChunks[i].unusedHosts, pk.String()) - } else if exists { - // This host has a piece, but it is the same piece another host - // has. We should still remove the host from the unusedHosts - // since one host having multiple pieces of a chunk might lead - // to unexpected issues. - delete(newUnfinishedChunks[i].unusedHosts, pk.String()) + for chunkIndex := uint64(0); chunkIndex < f.NumChunks(); chunkIndex++ { + pieces, err := f.Pieces(chunkIndex) + if err != nil { + r.log.Println("failed to get pieces for building incomplete chunks") + return nil + } + for pieceIndex, pieceSet := range pieces { + for _, piece := range pieceSet { + // Get the contract for the piece. + pk, exists := pks[string(piece.HostPubKey.Key)] + if !exists { + build.Critical("Couldn't find public key in map. This should never happen") + } + contractUtility, exists2 := r.hostContractor.ContractUtility(pk) + if exists != exists2 { + build.Critical("got a contract without utility or vice versa which shouldn't happen", + exists, exists2) + } + if !exists || !exists2 { + // File contract does not seem to be part of the host anymore. + continue + } + if !contractUtility.GoodForRenew { + // We are no longer renewing with this contract, so it does not + // count for redundancy. + continue + } + + // Mark the chunk set based on the pieces in this contract. + _, exists = newUnfinishedChunks[chunkIndex].unusedHosts[pk.String()] + redundantPiece := newUnfinishedChunks[chunkIndex].pieceUsage[pieceIndex] + if exists && !redundantPiece { + newUnfinishedChunks[chunkIndex].pieceUsage[pieceIndex] = true + newUnfinishedChunks[chunkIndex].piecesCompleted++ + delete(newUnfinishedChunks[chunkIndex].unusedHosts, pk.String()) + } else if exists { + // This host has a piece, but it is the same piece another host + // has. We should still remove the host from the unusedHosts + // since one host having multiple pieces of a chunk might lead + // to unexpected issues. + delete(newUnfinishedChunks[chunkIndex].unusedHosts, pk.String()) + } } - } } From bc88503717d4598b1fcd7eb4670e5efff6dd552d Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 20 Jun 2018 11:15:25 -0400 Subject: [PATCH 08/15] Implement conversion methods --- modules/renter/download.go | 15 +++- modules/renter/files.go | 106 +++++++++++++++++++++-- modules/renter/files_test.go | 7 +- modules/renter/persist.go | 131 +++++++++++++++++++++++++++-- modules/renter/siafile/compat.go | 102 ++++++++++++++++++++++ modules/renter/siafile/metadata.go | 5 +- modules/renter/siafile/siafile.go | 8 +- 7 files changed, 350 insertions(+), 24 deletions(-) create mode 100644 modules/renter/siafile/compat.go diff --git a/modules/renter/download.go b/modules/renter/download.go index 8d0dfba849..45a85deed0 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -384,21 +384,28 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { // For each chunk, assemble a mapping from the contract id to the index of // the piece within the chunk that the contract is responsible for. chunkMaps := make([]map[string]downloadPieceInfo, maxChunk-minChunk+1) - for chunkIndex := range chunkMaps { - chunkMaps[chunkIndex] = make(map[string]downloadPieceInfo) + for chunkIndex := minChunk; chunkIndex <= maxChunk; chunkIndex++ { + // Create the map. + chunkMaps[chunkIndex-minChunk] = make(map[string]downloadPieceInfo) + // Get the pieces for the chunk. pieces, err := params.file.Pieces(uint64(chunkIndex)) if err != nil { return nil, err } for pieceIndex, pieceSet := range pieces { for _, piece := range pieceSet { - chunkMaps[chunkIndex][string(piece.HostPubKey.Key)] = downloadPieceInfo{ + // Sanity check - the same worker should not have two pieces for + // the same chunk. + _, exists := chunkMaps[chunkIndex-minChunk][string(piece.HostPubKey.Key)] + if exists { + r.log.Println("ERROR: Worker has multiple pieces uploaded for the same chunk.") + } + chunkMaps[chunkIndex-minChunk][string(piece.HostPubKey.Key)] = downloadPieceInfo{ index: uint64(pieceIndex), root: piece.MerkleRoot, } } } - } // Queue the downloads for each chunk. diff --git a/modules/renter/files.go b/modules/renter/files.go index c71e287901..e211bd4336 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -5,9 +5,11 @@ import ( "path/filepath" "sync" + "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/siafile" + "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/errors" @@ -83,11 +85,20 @@ func (r *Renter) DeleteFile(nickname string) error { delete(r.files, nickname) delete(r.persist.Tracking, nickname) + err := persist.RemoveFile(filepath.Join(r.persistDir, f.SiaPath()+ShareExtension)) + if err != nil { + r.log.Println("WARN: couldn't remove file :", err) + } + r.saveSync() r.mu.Unlock(lockID) + // mark the file as deleted + f.Delete() + // TODO: delete the sectors of the file as well. - return errors.AddContext(f.Delete(), "failed to delete file") + + return nil } // FileList returns all of the files that the renter has. @@ -250,12 +261,97 @@ func (r *Renter) RenameFile(currentName, newName string) error { // fileToSiaFile converts a legacy file to a SiaFile. Fields that can't be // populated using the legacy file remain blank. -func fileToSiaFile(f *file) *siafile.SiaFile { - panic("not implemented yet") +func (r *Renter) fileToSiaFile(f *file) *siafile.SiaFile { + fileData := siafile.FileData{ + Name: f.name, + FileSize: f.size, + MasterKey: f.masterKey, + ErasureCode: f.erasureCode, + PieceSize: f.pieceSize, + Mode: os.FileMode(f.mode), + Deleted: f.deleted, + UID: f.staticUID, + } + chunks := make([]siafile.FileChunk, f.numChunks()) + for i := 0; i < len(chunks); i++ { + chunks[i].Pieces = make([][]siafile.Piece, f.erasureCode.NumPieces()) + } + for _, contract := range f.contracts { + pk := r.hostContractor.ResolveIDToPubKey(contract.ID) + for _, piece := range contract.Pieces { + chunks[piece.Chunk].Pieces[piece.Piece] = append(chunks[piece.Chunk].Pieces[piece.Piece], siafile.Piece{ + HostPubKey: pk, + MerkleRoot: piece.MerkleRoot, + }) + } + } + fileData.Chunks = chunks + return siafile.NewFromFileData(fileData) } // siaFileToFile converts a SiaFile to a legacy file. Fields that don't exist // in the legacy file will get lost and therefore not persisted. -func siaFileToFile(sf *siafile.SiaFile) *file { - panic("not implemented yet") +func (r *Renter) siaFileToFile(sf *siafile.SiaFile) *file { + fileData := sf.ExportFileData() + f := &file{ + contracts: make(map[types.FileContractID]fileContract), + name: fileData.Name, + size: fileData.FileSize, + masterKey: fileData.MasterKey, + erasureCode: fileData.ErasureCode, + pieceSize: fileData.PieceSize, + mode: uint32(fileData.Mode), + deleted: fileData.Deleted, + staticUID: fileData.UID, + } + for chunkIndex, chunk := range fileData.Chunks { + for pieceIndex, pieceSet := range chunk.Pieces { + for _, piece := range pieceSet { + c, ok := r.hostContractor.ContractByPublicKey(piece.HostPubKey) + if !ok { + build.Critical("missing contract when converting SiaFile to file") + continue + } + h, ok := r.hostDB.Host(piece.HostPubKey) + if !ok { + build.Critical("missing host when converting SiaFile to file") + continue + } + if _, exists := f.contracts[c.ID]; !exists { + f.contracts[c.ID] = fileContract{ + ID: c.ID, + IP: h.NetAddress, + WindowStart: c.EndHeight, + } + } + fc := f.contracts[c.ID] + fc.Pieces = append(fc.Pieces, pieceData{ + Chunk: uint64(chunkIndex), + Piece: uint64(pieceIndex), + MerkleRoot: piece.MerkleRoot, + }) + f.contracts[c.ID] = fc + } + } + } + return f +} + +// numChunks returns the number of chunks that f was split into. +func (f *file) numChunks() uint64 { + // empty files still need at least one chunk + if f.size == 0 { + return 1 + } + n := f.size / f.staticChunkSize() + // last chunk will be padded, unless chunkSize divides file evenly. + if f.size%f.staticChunkSize() != 0 { + n++ + } + return n +} + +// staticChunkSize returns the size of one chunk. +func (f *file) staticChunkSize() uint64 { + return f.pieceSize * uint64(f.erasureCode.MinPieces()) } diff --git a/modules/renter/files_test.go b/modules/renter/files_test.go index bb1adb19a5..3d762c3e05 100644 --- a/modules/renter/files_test.go +++ b/modules/renter/files_test.go @@ -245,6 +245,7 @@ func TestRenterFileListLocalPath(t *testing.T) { defer rt.Close() id := rt.renter.mu.Lock() f := newTestingFile() + rt.renter.files[f.SiaPath()] = f rt.renter.persist.Tracking[f.SiaPath()] = trackedFile{ RepairPath: "TestPath", } @@ -277,7 +278,7 @@ func TestRenterDeleteFile(t *testing.T) { // Put a file in the renter. file1 := newTestingFile() - rt.renter.files["1"] = file1 + rt.renter.files[file1.SiaPath()] = file1 // Delete a different file. err = rt.renter.DeleteFile("one") if err != ErrUnknownPath { @@ -342,11 +343,11 @@ func TestRenterFileList(t *testing.T) { // Put a file in the renter. file1 := newTestingFile() - rt.renter.files["1"] = file1 + rt.renter.files[file1.SiaPath()] = file1 if len(rt.renter.FileList()) != 1 { t.Error("FileList is not returning the only file in the renter") } - if rt.renter.FileList()[0].SiaPath != "one" { + if rt.renter.FileList()[0].SiaPath != file1.SiaPath() { t.Error("FileList is not returning the correct filename for the only file") } diff --git a/modules/renter/persist.go b/modules/renter/persist.go index d9682cebac..56761fac16 100644 --- a/modules/renter/persist.go +++ b/modules/renter/persist.go @@ -10,10 +10,12 @@ import ( "path/filepath" "strconv" + "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/Sia/persist" + "github.com/NebulousLabs/Sia/types" ) const ( @@ -57,6 +59,121 @@ type ( } ) +// MarshalSia implements the encoding.SiaMarshaller interface, writing the +// file data to w. +func (f *file) MarshalSia(w io.Writer) error { + enc := encoding.NewEncoder(w) + + // encode easy fields + err := enc.EncodeAll( + f.name, + f.size, + f.masterKey, + f.pieceSize, + f.mode, + ) + if err != nil { + return err + } + // COMPATv0.4.3 - encode the bytesUploaded and chunksUploaded fields + // TODO: the resulting .sia file may confuse old clients. + err = enc.EncodeAll(f.pieceSize*f.numChunks()*uint64(f.erasureCode.NumPieces()), f.numChunks()) + if err != nil { + return err + } + + // encode erasureCode + switch code := f.erasureCode.(type) { + case *rsCode: + err = enc.EncodeAll( + "Reed-Solomon", + uint64(code.dataPieces), + uint64(code.numPieces-code.dataPieces), + ) + if err != nil { + return err + } + default: + if build.DEBUG { + panic("unknown erasure code") + } + return errors.New("unknown erasure code") + } + // encode contracts + if err := enc.Encode(uint64(len(f.contracts))); err != nil { + return err + } + for _, c := range f.contracts { + if err := enc.Encode(c); err != nil { + return err + } + } + return nil +} + +// UnmarshalSia implements the encoding.SiaUnmarshaller interface, +// reconstructing a file from the encoded bytes read from r. +func (f *file) UnmarshalSia(r io.Reader) error { + dec := encoding.NewDecoder(r) + + // COMPATv0.4.3 - decode bytesUploaded and chunksUploaded into dummy vars. + var bytesUploaded, chunksUploaded uint64 + + // Decode easy fields. + err := dec.DecodeAll( + &f.name, + &f.size, + &f.masterKey, + &f.pieceSize, + &f.mode, + &bytesUploaded, + &chunksUploaded, + ) + if err != nil { + return err + } + f.staticUID = persist.RandomSuffix() + + // Decode erasure coder. + var codeType string + if err := dec.Decode(&codeType); err != nil { + return err + } + switch codeType { + case "Reed-Solomon": + var nData, nParity uint64 + err = dec.DecodeAll( + &nData, + &nParity, + ) + if err != nil { + return err + } + rsc, err := NewRSCode(int(nData), int(nParity)) + if err != nil { + return err + } + f.erasureCode = rsc + default: + return errors.New("unrecognized erasure code type: " + codeType) + } + + // Decode contracts. + var nContracts uint64 + if err := dec.Decode(&nContracts); err != nil { + return err + } + f.contracts = make(map[types.FileContractID]fileContract) + var contract fileContract + for i := uint64(0); i < nContracts; i++ { + if err := dec.Decode(&contract); err != nil { + return err + } + f.contracts[contract.ID] = contract + } + return nil +} + // saveFile saves a file to the renter directory. func (r *Renter) saveFile(f *siafile.SiaFile) error { if f.Deleted() { // TODO: violation of locking convention @@ -77,7 +194,7 @@ func (r *Renter) saveFile(f *siafile.SiaFile) error { defer handle.Close() // Write file data. - err = shareFiles([]*siafile.SiaFile{f}, handle) + err = r.shareFiles([]*siafile.SiaFile{f}, handle) if err != nil { return err } @@ -162,11 +279,11 @@ func (r *Renter) loadSettings() error { // shareFiles writes the specified files to w. First a header is written, // followed by the gzipped concatenation of each file. -func shareFiles(siaFiles []*siafile.SiaFile, w io.Writer) error { +func (r *Renter) shareFiles(siaFiles []*siafile.SiaFile, w io.Writer) error { // Convert files to old type. files := make([]*file, 0, len(siaFiles)) for _, sf := range siaFiles { - files = append(files, siaFileToFile(sf)) + files = append(files, r.siaFileToFile(sf)) } // Write header. err := encoding.NewEncoder(w).EncodeAll( @@ -219,7 +336,7 @@ func (r *Renter) ShareFiles(nicknames []string, shareDest string) error { files[i] = f } - err = shareFiles(files, handle) + err = r.shareFiles(files, handle) if err != nil { os.Remove(shareDest) return err @@ -244,7 +361,7 @@ func (r *Renter) ShareFilesASCII(nicknames []string) (string, error) { } buf := new(bytes.Buffer) - err := shareFiles(files, base64.NewEncoder(base64.URLEncoding, buf)) + err := r.shareFiles(files, base64.NewEncoder(base64.URLEncoding, buf)) if err != nil { return "", err } @@ -304,12 +421,12 @@ func (r *Renter) loadSharedFiles(reader io.Reader) ([]string, error) { // Add files to renter. names := make([]string, numFiles) for i, f := range files { - r.files[f.name] = fileToSiaFile(f) + r.files[f.name] = r.fileToSiaFile(f) names[i] = f.name } // Save the files. for _, f := range files { - r.saveFile(fileToSiaFile(f)) + r.saveFile(r.fileToSiaFile(f)) } return names, nil diff --git a/modules/renter/siafile/compat.go b/modules/renter/siafile/compat.go new file mode 100644 index 0000000000..5f046ce22a --- /dev/null +++ b/modules/renter/siafile/compat.go @@ -0,0 +1,102 @@ +package siafile + +import ( + "encoding/binary" + "os" + + "github.com/NebulousLabs/Sia/crypto" + "github.com/NebulousLabs/Sia/modules" +) + +type ( + // FileData is a helper struct that contains all the relevant information + // of a file. It simplifies passing the necessary data between modules and + // keeps the interface clean. + FileData struct { + Name string + FileSize uint64 + MasterKey crypto.TwofishKey + ErasureCode modules.ErasureCoder + PieceSize uint64 + Mode os.FileMode + Deleted bool + UID string + Chunks []FileChunk + } + // FileChunk is a helper struct that contains data about a chunk. + FileChunk struct { + Pieces [][]Piece + } +) + +// NewFromFileData creates a new SiaFile from a FileData object that was +// previously created from a legacy file. +func NewFromFileData(fd FileData) *SiaFile { + file := &SiaFile{ + metadata: Metadata{ + fileSize: int64(fd.FileSize), + masterKey: fd.MasterKey, + mode: fd.Mode, + pieceSize: fd.PieceSize, + siaPath: fd.Name, + }, + deleted: fd.Deleted, + erasureCode: fd.ErasureCode, + uid: fd.UID, + } + chunks := make([]Chunk, file.NumChunks()) + for i := range chunks { + chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1} + binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[0:4], uint32(file.erasureCode.MinPieces())) + binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[4:8], uint32(file.erasureCode.NumPieces()-file.erasureCode.MinPieces())) + chunks[i].pieces = make([][]Piece, file.erasureCode.NumPieces()) + } + file.chunks = chunks + + // Populate the pubKeyTable of the file and add the pieces. + pubKeyMap := make(map[string]int) + for chunkIndex, chunk := range fd.Chunks { + for pieceIndex, pieceSet := range chunk.Pieces { + for _, piece := range pieceSet { + // Check if we already added that public key. + if _, exists := pubKeyMap[string(piece.HostPubKey.Key)]; !exists { + pubKeyMap[string(piece.HostPubKey.Key)] = len(file.pubKeyTable) + file.pubKeyTable = append(file.pubKeyTable, piece.HostPubKey) + } + // Add the piece to the SiaFile. + file.chunks[chunkIndex].pieces[pieceIndex] = append(file.chunks[chunkIndex].pieces[pieceIndex], Piece{ + HostPubKey: piece.HostPubKey, + MerkleRoot: piece.MerkleRoot, + }) + } + } + } + return file +} + +// ExportFileData creates a FileData object from a SiaFile that can be used to +// convert the file into a legacy file. +func (sf *SiaFile) ExportFileData() FileData { + sf.mu.RLock() + defer sf.mu.RUnlock() + fd := FileData{ + Name: sf.metadata.siaPath, + FileSize: uint64(sf.metadata.fileSize), + MasterKey: sf.metadata.masterKey, + ErasureCode: sf.erasureCode, + PieceSize: sf.metadata.pieceSize, + Mode: sf.metadata.mode, + Deleted: sf.deleted, + UID: sf.uid, + } + // Return a deep-copy to avoid race conditions. + fd.Chunks = make([]FileChunk, len(sf.chunks)) + for chunkIndex := range fd.Chunks { + fd.Chunks[chunkIndex].Pieces = make([][]Piece, len(sf.chunks[chunkIndex].pieces)) + for pieceIndex := range fd.Chunks[chunkIndex].Pieces { + fd.Chunks[chunkIndex].Pieces[pieceIndex] = make([]Piece, len(sf.chunks[chunkIndex].pieces[pieceIndex])) + copy(fd.Chunks[chunkIndex].Pieces[pieceIndex], sf.chunks[chunkIndex].pieces[pieceIndex]) + } + } + return fd +} diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go index a7ed2f1015..c6afd88a9b 100644 --- a/modules/renter/siafile/metadata.go +++ b/modules/renter/siafile/metadata.go @@ -88,13 +88,10 @@ func (sf *SiaFile) ChunkSize() uint64 { // Delete removes the file from disk and marks it as deleted. Once the file is // deleted, certain methods should return an error. -// TODO: This will actually delete the file from disk once we change the -// persistence structure to use the new file format. -func (sf *SiaFile) Delete() error { +func (sf *SiaFile) Delete() { sf.mu.Lock() defer sf.mu.Unlock() sf.deleted = true - return nil } // Deleted indicates if this file has been deleted by the user. diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index 0864c0b057..3dbff77b45 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -163,7 +163,13 @@ func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) { return nil, fmt.Errorf("index %v out of bounds (%v)", chunkIndex, len(sf.chunks)) } - return sf.chunks[chunkIndex].pieces, nil + // Return a deep-copy to avoid race conditions. + pieces := make([][]Piece, len(sf.chunks[chunkIndex].pieces)) + for pieceIndex := range pieces { + pieces[pieceIndex] = make([]Piece, len(sf.chunks[chunkIndex].pieces[pieceIndex])) + copy(pieces[pieceIndex], sf.chunks[chunkIndex].pieces[pieceIndex]) + } + return pieces, nil } // UID returns a unique identifier for this file. From 6db28acff6fefad7f3e98a53cfa0a8589a6cd108 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 20 Jun 2018 17:16:16 -0400 Subject: [PATCH 09/15] fix race --- modules/renter/uploadheap.go | 45 +++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index 5e8436893a..ee0fa3c85d 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -182,12 +182,8 @@ func (r *Renter) buildUnfinishedChunks(f *siafile.SiaFile, hosts map[string]stru if !exists { build.Critical("Couldn't find public key in map. This should never happen") } - contractUtility, exists2 := r.hostContractor.ContractUtility(pk) - if exists != exists2 { - build.Critical("got a contract without utility or vice versa which shouldn't happen", - exists, exists2) - } - if !exists || !exists2 { + contractUtility, exists := r.hostContractor.ContractUtility(pk) + if !exists { // File contract does not seem to be part of the host anymore. continue } @@ -231,34 +227,47 @@ func (r *Renter) buildUnfinishedChunks(f *siafile.SiaFile, hosts map[string]stru // managedBuildChunkHeap will iterate through all of the files in the renter and // construct a chunk heap. func (r *Renter) managedBuildChunkHeap(hosts map[string]struct{}) { + // Get all the files holding the readlock. + lockID := r.mu.RLock() + files := make([]*siafile.SiaFile, 0, len(r.files)) + for _, file := range r.files { + files = append(files, file) + } + r.mu.RUnlock(lockID) + // Save host keys in map. We can't do that under the same lock since we // need to call a public method on the file. pks := make(map[string]types.SiaPublicKey) goodForRenew := make(map[string]bool) offline := make(map[string]bool) - for _, f := range r.files { + for _, f := range files { for _, pk := range f.HostPublicKeys() { pks[string(pk.Key)] = pk } } - // Loop through the whole set of files and get a list of chunks to add to - // the heap. - for _, file := range r.files { - for _, pk := range pks { - cu, ok := r.hostContractor.ContractUtility(pk) - if !ok { - continue - } - goodForRenew[string(pk.Key)] = ok && cu.GoodForRenew - offline[string(pk.Key)] = r.hostContractor.IsOffline(pk) + + // Build 2 maps that map every pubkey to its offline and goodForRenew + // status. + for _, pk := range pks { + cu, ok := r.hostContractor.ContractUtility(pk) + if !ok { + continue } + goodForRenew[string(pk.Key)] = ok && cu.GoodForRenew + offline[string(pk.Key)] = r.hostContractor.IsOffline(pk) + } + // Loop through the whole set of files and get a list of chunks to add to + // the heap. + for _, file := range files { + id := r.mu.Lock() unfinishedUploadChunks := r.buildUnfinishedChunks(file, hosts) + r.mu.Unlock(id) for i := 0; i < len(unfinishedUploadChunks); i++ { r.uploadHeap.managedPush(unfinishedUploadChunks[i]) } } - for _, file := range r.files { + for _, file := range files { // check for local file id := r.mu.RLock() tf, exists := r.persist.Tracking[file.SiaPath()] From 3668a0150619c2b7cc8d5d06eea89614020e96ca Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 21 Jun 2018 10:18:33 -0400 Subject: [PATCH 10/15] add missing defers, replace DeepEqual and panic if index out of bounds --- modules/renter/siafile/metadata.go | 4 ++-- modules/renter/siafile/siafile.go | 11 ++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go index c6afd88a9b..1190ba70d4 100644 --- a/modules/renter/siafile/metadata.go +++ b/modules/renter/siafile/metadata.go @@ -134,7 +134,7 @@ func (sf *SiaFile) HostPublicKeys() []types.SiaPublicKey { // MasterKey returns the masterkey used to encrypt the file. func (sf *SiaFile) MasterKey() crypto.TwofishKey { sf.mu.RLock() - sf.mu.RUnlock() + defer sf.mu.RUnlock() return sf.metadata.masterKey } @@ -158,7 +158,7 @@ func (sf *SiaFile) PieceSize() uint64 { // takes one argument, a map of offline contracts for this file. func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) float64 { sf.mu.RLock() - sf.mu.RUnlock() + defer sf.mu.RUnlock() if sf.metadata.fileSize == 0 { return -1 } diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index 3dbff77b45..3261f36314 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -1,10 +1,10 @@ package siafile import ( + "bytes" "encoding/base32" "encoding/binary" "fmt" - "reflect" "sync" "github.com/NebulousLabs/Sia/modules" @@ -105,7 +105,7 @@ func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64 // Get the index of the host in the public key table. tableIndex := -1 for i, hpk := range sf.pubKeyTable { - if reflect.DeepEqual(hpk, pk) { + if hpk.Algorithm == pk.Algorithm && bytes.Equal(hpk.Key, pk.Key) { tableIndex = i break } @@ -134,7 +134,7 @@ func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64 // ErasureCode returns the erasure coder used by the file. func (sf *SiaFile) ErasureCode() modules.ErasureCoder { sf.mu.RLock() - sf.mu.RUnlock() + defer sf.mu.RUnlock() return sf.erasureCode } @@ -142,6 +142,8 @@ func (sf *SiaFile) ErasureCode() modules.ErasureCoder { // return the number of chunks the file consists of even if the file is not // fully uploaded yet. func (sf *SiaFile) NumChunks() uint64 { + sf.mu.RLock() + defer sf.mu.RUnlock() // empty files still need at least one chunk if sf.metadata.fileSize == 0 { return 1 @@ -160,8 +162,7 @@ func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) { sf.mu.RLock() defer sf.mu.RUnlock() if chunkIndex >= uint64(len(sf.chunks)) { - return nil, fmt.Errorf("index %v out of bounds (%v)", - chunkIndex, len(sf.chunks)) + panic(fmt.Sprintf("index %v out of bounds (%v)", chunkIndex, len(sf.chunks))) } // Return a deep-copy to avoid race conditions. pieces := make([][]Piece, len(sf.chunks[chunkIndex].pieces)) From 4f34608e8b6fda43947161318c1632d8a7752b51 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 27 Jun 2018 11:31:14 -0400 Subject: [PATCH 11/15] move erasure code to chunks, add localPath and move available out of metadata.go --- modules/renter/download.go | 30 ++++-- modules/renter/downloadstreamer.go | 8 +- modules/renter/files.go | 3 +- modules/renter/files_test.go | 31 +++--- modules/renter/persist.go | 4 +- modules/renter/persist_test.go | 2 +- modules/renter/siafile/compat.go | 23 +++-- modules/renter/siafile/metadata.go | 151 +++++++-------------------- modules/renter/siafile/siafile.go | 160 ++++++++++++++++++++++++----- modules/renter/upload.go | 22 +++- modules/renter/uploadchunk.go | 4 +- modules/renter/uploadheap.go | 23 +++-- 12 files changed, 264 insertions(+), 197 deletions(-) diff --git a/modules/renter/download.go b/modules/renter/download.go index 45a85deed0..652c0c8205 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -378,8 +378,18 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { } // Determine which chunks to download. - minChunk := params.offset / params.file.ChunkSize() - maxChunk := (params.offset + params.length - 1) / params.file.ChunkSize() + minChunk, minChunkOffset := params.file.ChunkIndexByOffset(params.offset) + maxChunk, maxChunkOffset := params.file.ChunkIndexByOffset(params.offset + params.length) + if minChunk == params.file.NumChunks() || maxChunk == params.file.NumChunks() { + return nil, errors.New("download is requesting a chunk that is past the boundary of the file") + } + // If the maxChunkOffset is exactly 0 we need to subtract 1 chunk. e.g. if + // the chunkSize is 100 bytes and we want to download 100 bytes from offset + // 0, maxChunk would be 1 and maxChunkOffset would be 0. We want maxChunk + // to be 0 though since we don't actually need any data from chunk 1. + if maxChunk > 0 && maxChunkOffset == 0 { + maxChunk-- + } // For each chunk, assemble a mapping from the contract id to the index of // the piece within the chunk that the contract is responsible for. @@ -414,13 +424,13 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { for i := minChunk; i <= maxChunk; i++ { udc := &unfinishedDownloadChunk{ destination: params.destination, - erasureCode: params.file.ErasureCode(), + erasureCode: params.file.ErasureCode(i), masterKey: params.file.MasterKey(), staticChunkIndex: i, staticCacheID: fmt.Sprintf("%v:%v", d.staticSiaPath, i), staticChunkMap: chunkMaps[i-minChunk], - staticChunkSize: params.file.ChunkSize(), + staticChunkSize: params.file.ChunkSize(i), staticPieceSize: params.file.PieceSize(), // TODO: 25ms is just a guess for a good default. Really, we want to @@ -436,8 +446,8 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { staticNeedsMemory: params.needsMemory, staticPriority: params.priority, - physicalChunkData: make([][]byte, params.file.ErasureCode().NumPieces()), - pieceUsage: make([]bool, params.file.ErasureCode().NumPieces()), + physicalChunkData: make([][]byte, params.file.ErasureCode(i).NumPieces()), + pieceUsage: make([]bool, params.file.ErasureCode(i).NumPieces()), download: d, staticStreamCache: r.staticStreamCache, @@ -446,16 +456,16 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { // Set the fetchOffset - the offset within the chunk that we start // downloading from. if i == minChunk { - udc.staticFetchOffset = params.offset % params.file.ChunkSize() + udc.staticFetchOffset = minChunkOffset } else { udc.staticFetchOffset = 0 } // Set the fetchLength - the number of bytes to fetch within the chunk // that we start downloading from. - if i == maxChunk && (params.length+params.offset)%params.file.ChunkSize() != 0 { - udc.staticFetchLength = ((params.length + params.offset) % params.file.ChunkSize()) - udc.staticFetchOffset + if i == maxChunk && maxChunkOffset != 0 { + udc.staticFetchLength = maxChunkOffset - udc.staticFetchOffset } else { - udc.staticFetchLength = params.file.ChunkSize() - udc.staticFetchOffset + udc.staticFetchLength = params.file.ChunkSize(i) - udc.staticFetchOffset } // Set the writeOffset within the destination for where the data should // be written. diff --git a/modules/renter/downloadstreamer.go b/modules/renter/downloadstreamer.go index 18dca3deab..544f79a18c 100644 --- a/modules/renter/downloadstreamer.go +++ b/modules/renter/downloadstreamer.go @@ -64,10 +64,14 @@ func (s *streamer) Read(p []byte) (n int, err error) { } // Calculate how much we can download. We never download more than a single chunk. - chunkSize := s.file.ChunkSize() + chunkIndex, chunkOffset := s.file.ChunkIndexByOffset(uint64(s.offset)) + if chunkIndex == s.file.NumChunks() { + return 0, io.EOF + } + chunkSize := s.file.ChunkSize(chunkIndex) remainingData := uint64(fileSize - s.offset) requestedData := uint64(len(p)) - remainingChunk := chunkSize - uint64(s.offset)%chunkSize + remainingChunk := chunkSize - chunkOffset length := min(remainingData, requestedData, remainingChunk) // Download data diff --git a/modules/renter/files.go b/modules/renter/files.go index e211bd4336..be17374917 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -261,12 +261,13 @@ func (r *Renter) RenameFile(currentName, newName string) error { // fileToSiaFile converts a legacy file to a SiaFile. Fields that can't be // populated using the legacy file remain blank. -func (r *Renter) fileToSiaFile(f *file) *siafile.SiaFile { +func (r *Renter) fileToSiaFile(f *file, repairPath string) *siafile.SiaFile { fileData := siafile.FileData{ Name: f.name, FileSize: f.size, MasterKey: f.masterKey, ErasureCode: f.erasureCode, + RepairPath: repairPath, PieceSize: f.pieceSize, Mode: os.FileMode(f.mode), Deleted: f.deleted, diff --git a/modules/renter/files_test.go b/modules/renter/files_test.go index 3d762c3e05..7ee01635ca 100644 --- a/modules/renter/files_test.go +++ b/modules/renter/files_test.go @@ -7,7 +7,6 @@ import ( "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" - "github.com/NebulousLabs/Sia/modules/renter/siafile" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/errors" ) @@ -32,7 +31,7 @@ func TestFileNumChunks(t *testing.T) { for _, test := range tests { rsc, _ := NewRSCode(test.piecesPerChunk, 1) // can't use 0 - f := siafile.New(t.Name(), rsc, test.pieceSize, test.size) + f := newFile(t.Name(), rsc, test.pieceSize, test.size, 0777, "") if f.NumChunks() != test.expNumChunks { t.Errorf("Test %v: expected %v, got %v", test, test.expNumChunks, f.NumChunks()) } @@ -42,7 +41,7 @@ func TestFileNumChunks(t *testing.T) { // TestFileAvailable probes the available method of the file type. func TestFileAvailable(t *testing.T) { rsc, _ := NewRSCode(1, 1) // can't use 0 - f := siafile.New(t.Name(), rsc, pieceSize, 100) + f := newFile(t.Name(), rsc, pieceSize, 100, 0777, "") neverOffline := make(map[string]bool) if f.Available(neverOffline) { @@ -69,7 +68,7 @@ func TestFileAvailable(t *testing.T) { func TestFileUploadedBytes(t *testing.T) { // ensure that a piece fits within a sector rsc, _ := NewRSCode(1, 3) - f := siafile.New(t.Name(), rsc, modules.SectorSize/2, 1000) + f := newFile(t.Name(), rsc, modules.SectorSize/2, 1000, 0777, "") for i := uint64(0); i < 4; i++ { err := f.AddPiece(types.SiaPublicKey{}, uint64(0), i, crypto.Hash{}) if err != nil { @@ -85,7 +84,7 @@ func TestFileUploadedBytes(t *testing.T) { // 100%, even if more pieces have been uploaded, func TestFileUploadProgressPinning(t *testing.T) { rsc, _ := NewRSCode(1, 1) - f := siafile.New(t.Name(), rsc, 2, 4) + f := newFile(t.Name(), rsc, 2, 4, 0777, "") for i := uint64(0); i < 2; i++ { err1 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, uint64(0), i, crypto.Hash{}) err2 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, uint64(0), i, crypto.Hash{}) @@ -111,7 +110,7 @@ func TestFileRedundancy(t *testing.T) { for _, nData := range nDatas { rsc, _ := NewRSCode(nData, 10) - f := siafile.New(t.Name(), rsc, 100, 1000) + f := newFile(t.Name(), rsc, 100, 1000, 0777, "") // Test that an empty file has 0 redundancy. if r := f.Redundancy(neverOffline, goodForRenew); r != 0 { t.Error("expected 0 redundancy, got", r) @@ -145,33 +144,33 @@ func TestFileRedundancy(t *testing.T) { t.Fatal(err) } // 1.0 / MinPieces because the chunk with the least number of pieces has 1 piece. - expectedR := 1.0 / float64(f.ErasureCode().MinPieces()) + expectedR := 1.0 / float64(f.ErasureCode(0).MinPieces()) if r := f.Redundancy(neverOffline, goodForRenew); r != expectedR { t.Errorf("expected %f redundancy, got %f", expectedR, r) } // Test that adding a file contract that has erasureCode.MinPieces() pieces // per chunk for all chunks results in a file with redundancy > 1. for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ { - for iPiece := uint64(1); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ { + for iPiece := uint64(1); iPiece < uint64(f.ErasureCode(0).MinPieces()); iPiece++ { err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(3)}}, iChunk, iPiece, crypto.Hash{}) if err != nil { t.Fatal(err) } } - err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(4)}}, iChunk, uint64(f.ErasureCode().MinPieces()), crypto.Hash{}) + err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(4)}}, iChunk, uint64(f.ErasureCode(0).MinPieces()), crypto.Hash{}) if err != nil { t.Fatal(err) } } // 1+MinPieces / MinPieces because the chunk with the least number of pieces has 1+MinPieces pieces. - expectedR = float64(1+f.ErasureCode().MinPieces()) / float64(f.ErasureCode().MinPieces()) + expectedR = float64(1+f.ErasureCode(0).MinPieces()) / float64(f.ErasureCode(0).MinPieces()) if r := f.Redundancy(neverOffline, goodForRenew); r != expectedR { t.Errorf("expected %f redundancy, got %f", expectedR, r) } // verify offline file contracts are not counted in the redundancy for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ { - for iPiece := uint64(0); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ { + for iPiece := uint64(0); iPiece < uint64(f.ErasureCode(0).MinPieces()); iPiece++ { err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(5)}}, iChunk, iPiece, crypto.Hash{}) if err != nil { t.Fatal(err) @@ -191,7 +190,8 @@ func TestFileRedundancy(t *testing.T) { // TestFileExpiration probes the expiration method of the file type. func TestFileExpiration(t *testing.T) { - f := newTestingFile() + rsc, _ := NewRSCode(1, 2) + f := newFile(t.Name(), rsc, pieceSize, 1000, 0777, "") contracts := make(map[string]modules.RenterContract) if f.Expiration(contracts) != 0 { t.Error("file with no pieces should report as having no time remaining") @@ -245,9 +245,10 @@ func TestRenterFileListLocalPath(t *testing.T) { defer rt.Close() id := rt.renter.mu.Lock() f := newTestingFile() + f.SetLocalPath("TestPath") rt.renter.files[f.SiaPath()] = f rt.renter.persist.Tracking[f.SiaPath()] = trackedFile{ - RepairPath: "TestPath", + RepairPath: f.LocalPath(), } rt.renter.mu.Unlock(id) files := rt.renter.FileList() @@ -414,7 +415,9 @@ func TestRenterRenameFile(t *testing.T) { } // Renaming should also update the tracking set - rt.renter.persist.Tracking["1"] = trackedFile{"foo"} + rt.renter.persist.Tracking["1"] = trackedFile{ + RepairPath: f2.LocalPath(), + } err = rt.renter.RenameFile("1", "1b") if err != nil { t.Fatal(err) diff --git a/modules/renter/persist.go b/modules/renter/persist.go index 56761fac16..576b0253c5 100644 --- a/modules/renter/persist.go +++ b/modules/renter/persist.go @@ -421,12 +421,12 @@ func (r *Renter) loadSharedFiles(reader io.Reader) ([]string, error) { // Add files to renter. names := make([]string, numFiles) for i, f := range files { - r.files[f.name] = r.fileToSiaFile(f) + r.files[f.name] = r.fileToSiaFile(f, r.persist.Tracking[f.name].RepairPath) names[i] = f.name } // Save the files. for _, f := range files { - r.saveFile(r.fileToSiaFile(f)) + r.saveFile(r.fileToSiaFile(f, r.persist.Tracking[f.name].RepairPath)) } return names, nil diff --git a/modules/renter/persist_test.go b/modules/renter/persist_test.go index 9c8b7ea0ee..504875ad67 100644 --- a/modules/renter/persist_test.go +++ b/modules/renter/persist_test.go @@ -23,7 +23,7 @@ func newTestingFile() *siafile.SiaFile { name := "testfile-" + strconv.Itoa(int(data[0])) - return siafile.New(name, rsc, pieceSize, 1000) + return newFile(name, rsc, pieceSize, 1000, 0777, "") } // equalFiles is a helper function that compares two files for equality. diff --git a/modules/renter/siafile/compat.go b/modules/renter/siafile/compat.go index 5f046ce22a..62cbd43fb5 100644 --- a/modules/renter/siafile/compat.go +++ b/modules/renter/siafile/compat.go @@ -17,6 +17,7 @@ type ( FileSize uint64 MasterKey crypto.TwofishKey ErasureCode modules.ErasureCoder + RepairPath string PieceSize uint64 Mode os.FileMode Deleted bool @@ -40,18 +41,17 @@ func NewFromFileData(fd FileData) *SiaFile { pieceSize: fd.PieceSize, siaPath: fd.Name, }, - deleted: fd.Deleted, - erasureCode: fd.ErasureCode, - uid: fd.UID, + deleted: fd.Deleted, + uid: fd.UID, } - chunks := make([]Chunk, file.NumChunks()) - for i := range chunks { - chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1} - binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[0:4], uint32(file.erasureCode.MinPieces())) - binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[4:8], uint32(file.erasureCode.NumPieces()-file.erasureCode.MinPieces())) - chunks[i].pieces = make([][]Piece, file.erasureCode.NumPieces()) + file.chunks = make([]Chunk, len(fd.Chunks)) + for i := range file.chunks { + file.chunks[i].erasureCode = fd.ErasureCode + file.chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1} + binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[0:4], uint32(file.chunks[i].erasureCode.MinPieces())) + binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[4:8], uint32(file.chunks[i].erasureCode.NumPieces()-file.chunks[i].erasureCode.MinPieces())) + file.chunks[i].pieces = make([][]Piece, file.chunks[i].erasureCode.NumPieces()) } - file.chunks = chunks // Populate the pubKeyTable of the file and add the pieces. pubKeyMap := make(map[string]int) @@ -83,7 +83,8 @@ func (sf *SiaFile) ExportFileData() FileData { Name: sf.metadata.siaPath, FileSize: uint64(sf.metadata.fileSize), MasterKey: sf.metadata.masterKey, - ErasureCode: sf.erasureCode, + ErasureCode: sf.chunks[0].erasureCode, + RepairPath: sf.metadata.localPath, PieceSize: sf.metadata.pieceSize, Mode: sf.metadata.mode, Deleted: sf.deleted, diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go index 1190ba70d4..bc80df911d 100644 --- a/modules/renter/siafile/metadata.go +++ b/modules/renter/siafile/metadata.go @@ -5,7 +5,6 @@ import ( "os" "time" - "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" @@ -14,12 +13,12 @@ import ( type ( // Metadata is the metadata of a SiaFile and is JSON encoded. Metadata struct { - version [16]byte // version of the sia file format used - fileSize int64 // total size of the file - masterKey crypto.TwofishKey // masterkey used to encrypt pieces - pieceSize uint64 // size of a single piece of the file - trackingPath string // file to the local copy of the file used for repairing - siaPath string + version [16]byte // version of the sia file format used + fileSize int64 // total size of the file + masterKey crypto.TwofishKey // masterkey used to encrypt pieces + pieceSize uint64 // size of a single piece of the file + localPath string // file to the local copy of the file used for repairing + siaPath string // the path of the file on the Sia network // The following fields are the usual unix timestamps of files. modTime time.Time // time of last content modification @@ -32,10 +31,10 @@ type ( uid int // id of the user who owns the file gid int // id of the group that owns the file - // chunkHeaderSize is the size of each of the following chunk's metadata. - chunkHeaderSize uint64 - // chunkBodySize is the size of each of the following chunk's bodies. - chunkBodySize uint64 + // chunkMetadataSize is the amount of space allocated within the + // siafile for the metadata of a single chunk. It allows us to do + // random access operations on the file in constant time. + chunkMetadataSize uint64 // The following fields are the offsets for data that is written to disk // after the pubKeyTable. We reserve a generous amount of space for the @@ -43,9 +42,9 @@ type ( // need to resize later on. // // chunkOffset is the offset of the first chunk, forced to be a factor of - // 4096, default 16kib + // 4096, default 4kib // - // pubKeyTableOffset is the office of the publicKeyTable within the + // pubKeyTableOffset is the offset of the publicKeyTable within the // file. // chunkOffset int64 @@ -53,37 +52,11 @@ type ( } ) -// Available indicates whether the file is ready to be downloaded. -func (sf *SiaFile) Available(offline map[string]bool) bool { - sf.mu.RLock() - defer sf.mu.RUnlock() - // We need to find at least erasureCode.MinPieces different pieces for each - // chunk for the file to be available. - for _, chunk := range sf.chunks { - piecesForChunk := 0 - for _, pieceSet := range chunk.pieces { - for _, piece := range pieceSet { - if !offline[string(piece.HostPubKey.Key)] { - piecesForChunk++ - break // break out since we only count unique pieces - } - } - if piecesForChunk >= sf.erasureCode.MinPieces() { - break // we already have enough pieces for this chunk. - } - } - if piecesForChunk < sf.erasureCode.MinPieces() { - return false // this chunk isn't available. - } - } - return true -} - // ChunkSize returns the size of a single chunk of the file. -func (sf *SiaFile) ChunkSize() uint64 { +func (sf *SiaFile) ChunkSize(chunkIndex uint64) uint64 { sf.mu.RLock() defer sf.mu.RUnlock() - return sf.chunkSize() + return sf.chunkSize(chunkIndex) } // Delete removes the file from disk and marks it as deleted. Once the file is @@ -131,6 +104,13 @@ func (sf *SiaFile) HostPublicKeys() []types.SiaPublicKey { return sf.pubKeyTable } +// LocalPath returns the path of the local data of the file. +func (sf *SiaFile) LocalPath() string { + sf.mu.RLock() + defer sf.mu.RUnlock() + return sf.metadata.localPath +} + // MasterKey returns the masterkey used to encrypt the file. func (sf *SiaFile) MasterKey() crypto.TwofishKey { sf.mu.RLock() @@ -152,78 +132,6 @@ func (sf *SiaFile) PieceSize() uint64 { return sf.metadata.pieceSize } -// Redundancy returns the redundancy of the least redundant chunk. A file -// becomes available when this redundancy is >= 1. Assumes that every piece is -// unique within a file contract. -1 is returned if the file has size 0. It -// takes one argument, a map of offline contracts for this file. -func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) float64 { - sf.mu.RLock() - defer sf.mu.RUnlock() - if sf.metadata.fileSize == 0 { - return -1 - } - - minPiecesRenew := ^uint64(0) - minPiecesNoRenew := ^uint64(0) - for _, chunk := range sf.chunks { - // Loop over chunks and remember how many unique pieces of the chunk - // were goodForRenew and how many were not. - numPiecesRenew := uint64(0) - numPiecesNoRenew := uint64(0) - for _, pieceSet := range chunk.pieces { - // Remember if we encountered a goodForRenew piece or a - // !goodForRenew piece that was at least online. - foundGoodForRenew := false - foundOnline := false - for _, piece := range pieceSet { - offline, exists1 := offlineMap[string(piece.HostPubKey.Key)] - goodForRenew, exists2 := goodForRenewMap[string(piece.HostPubKey.Key)] - if exists1 != exists2 { - build.Critical("contract can't be in one map but not in the other") - } - if !exists1 || offline { - continue - } - // If we found a goodForRenew piece we can stop. - if goodForRenew { - foundGoodForRenew = true - break - } - // Otherwise we continue since there might be other hosts with - // the same piece that are goodForRenew. We still remember that - // we found an online piece though. - foundOnline = true - } - if foundGoodForRenew { - numPiecesRenew++ - numPiecesNoRenew++ - } else if foundOnline { - numPiecesNoRenew++ - } - } - // Remember the smallest number of goodForRenew pieces encountered. - if numPiecesRenew < minPiecesRenew { - minPiecesRenew = numPiecesRenew - } - // Remember the smallest number of !goodForRenew pieces encountered. - if numPiecesNoRenew < minPiecesNoRenew { - minPiecesNoRenew = numPiecesNoRenew - } - } - - // If the redundancy is smaller than 1x we return the redundancy that - // includes contracts that are not good for renewal. The reason for this is - // a better user experience. If the renter operates correctly, redundancy - // should never go above numPieces / minPieces and redundancyNoRenew should - // never go below 1. - redundancy := float64(minPiecesRenew) / float64(sf.erasureCode.MinPieces()) - redundancyNoRenew := float64(minPiecesNoRenew) / float64(sf.erasureCode.MinPieces()) - if redundancy < 1 { - return redundancyNoRenew - } - return redundancy -} - // Rename changes the name of the file to a new one. // TODO: This will actually rename the file on disk once we persist the new // file format. @@ -241,6 +149,14 @@ func (sf *SiaFile) SetMode(mode os.FileMode) { sf.metadata.mode = mode } +// SetLocalPath changes the local path of the file which is used to repair +// the file from disk. +func (sf *SiaFile) SetLocalPath(path string) { + sf.mu.Lock() + defer sf.mu.Unlock() + sf.metadata.localPath = path +} + // SiaPath returns the file's sia path. func (sf *SiaFile) SiaPath() string { sf.mu.RLock() @@ -278,11 +194,14 @@ func (sf *SiaFile) UploadedBytes() uint64 { // reaches 100%, and UploadProgress may report a value greater than 100%. func (sf *SiaFile) UploadProgress() float64 { uploaded := sf.UploadedBytes() - desired := modules.SectorSize * uint64(sf.ErasureCode().NumPieces()) * sf.NumChunks() + var desired uint64 + for i := uint64(0); i < sf.NumChunks(); i++ { + desired += modules.SectorSize * uint64(sf.ErasureCode(i).NumPieces()) + } return math.Min(100*(float64(uploaded)/float64(desired)), 100) } // ChunkSize returns the size of a single chunk of the file. -func (sf *SiaFile) chunkSize() uint64 { - return sf.metadata.pieceSize * uint64(sf.erasureCode.MinPieces()) +func (sf *SiaFile) chunkSize(chunkIndex uint64) uint64 { + return sf.metadata.pieceSize * uint64(sf.chunks[chunkIndex].erasureCode.MinPieces()) } diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index 3261f36314..f79185b239 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -2,11 +2,12 @@ package siafile import ( "bytes" - "encoding/base32" "encoding/binary" "fmt" + "os" "sync" + "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" @@ -35,10 +36,9 @@ type ( chunks []Chunk // utility fields. These are not persisted. - deleted bool - erasureCode modules.ErasureCoder - mu sync.RWMutex - uid string + deleted bool + mu sync.RWMutex + uid string } // Chunk represents a single chunk of a file on disk @@ -56,6 +56,7 @@ type ( // erasureCodeType [4]byte erasureCodeParams [8]byte + erasureCode modules.ErasureCoder // extensionInfo is some reserved space for each chunk that allows us // to indicate if a chunk is special. @@ -74,30 +75,33 @@ type ( ) // New create a new SiaFile. -func New(siaPath string, erasureCode modules.ErasureCoder, pieceSize, fileSize uint64) *SiaFile { +// TODO needs changes once we move persistence over. +func New(siaPath string, erasureCode []modules.ErasureCoder, pieceSize, fileSize uint64, fileMode os.FileMode, source string) *SiaFile { file := &SiaFile{ metadata: Metadata{ fileSize: int64(fileSize), + localPath: source, masterKey: crypto.GenerateTwofishKey(), + mode: fileMode, pieceSize: pieceSize, siaPath: siaPath, }, - erasureCode: erasureCode, - uid: base32.StdEncoding.EncodeToString(fastrand.Bytes(20))[:20], + uid: string(fastrand.Bytes(20)), } - chunks := make([]Chunk, file.NumChunks()) - for i := range chunks { - chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1} - binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[0:4], uint32(erasureCode.MinPieces())) - binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[4:8], uint32(erasureCode.NumPieces()-erasureCode.MinPieces())) - chunks[i].pieces = make([][]Piece, erasureCode.NumPieces()) + file.chunks = make([]Chunk, len(erasureCode)) + for i := range file.chunks { + file.chunks[i].erasureCode = erasureCode[i] + file.chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1} + binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[0:4], uint32(erasureCode[i].MinPieces())) + binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[4:8], uint32(erasureCode[i].NumPieces()-erasureCode[i].MinPieces())) + file.chunks[i].pieces = make([][]Piece, erasureCode[i].NumPieces()) } - file.chunks = chunks return file } // AddPiece adds an uploaded piece to the file. It also updates the host table // if the public key of the host is not aleady known. +// TODO needs changes once we move persistence over. func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64, merkleRoot crypto.Hash) error { sf.mu.Lock() defer sf.mu.Unlock() @@ -131,11 +135,50 @@ func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64 return nil } +// Available indicates whether the file is ready to be downloaded. +func (sf *SiaFile) Available(offline map[string]bool) bool { + sf.mu.RLock() + defer sf.mu.RUnlock() + // We need to find at least erasureCode.MinPieces different pieces for each + // chunk for the file to be available. + for chunkIndex, chunk := range sf.chunks { + piecesForChunk := 0 + for _, pieceSet := range chunk.pieces { + for _, piece := range pieceSet { + if !offline[string(piece.HostPubKey.Key)] { + piecesForChunk++ + break // break out since we only count unique pieces + } + } + if piecesForChunk >= sf.chunks[chunkIndex].erasureCode.MinPieces() { + break // we already have enough pieces for this chunk. + } + } + if piecesForChunk < sf.chunks[chunkIndex].erasureCode.MinPieces() { + return false // this chunk isn't available. + } + } + return true +} + +// ChunkIndexByOffset will return the chunkIndex that contains the provided +// offset of a file and also the relative offset within the chunk. If the +// offset is out of bounds, chunkIndex will be equal to NumChunk(). +func (sf *SiaFile) ChunkIndexByOffset(offset uint64) (chunkIndex uint64, off uint64) { + for chunkIndex := uint64(0); chunkIndex < uint64(len(sf.chunks)); chunkIndex++ { + if sf.chunkSize(chunkIndex) > offset { + return chunkIndex, offset + } + offset -= sf.chunkSize(chunkIndex) + } + return +} + // ErasureCode returns the erasure coder used by the file. -func (sf *SiaFile) ErasureCode() modules.ErasureCoder { +func (sf *SiaFile) ErasureCode(chunkIndex uint64) modules.ErasureCoder { sf.mu.RLock() defer sf.mu.RUnlock() - return sf.erasureCode + return sf.chunks[chunkIndex].erasureCode } // NumChunks returns the number of chunks the file consists of. This will @@ -144,16 +187,7 @@ func (sf *SiaFile) ErasureCode() modules.ErasureCoder { func (sf *SiaFile) NumChunks() uint64 { sf.mu.RLock() defer sf.mu.RUnlock() - // empty files still need at least one chunk - if sf.metadata.fileSize == 0 { - return 1 - } - n := uint64(sf.metadata.fileSize) / sf.chunkSize() - // last chunk will be padded, unless chunkSize divides file evenly. - if uint64(sf.metadata.fileSize)%sf.chunkSize() != 0 { - n++ - } - return n + return uint64(len(sf.chunks)) } // Pieces returns all the pieces for a chunk in a slice of slices that contains @@ -173,6 +207,78 @@ func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) { return pieces, nil } +// Redundancy returns the redundancy of the least redundant chunk. A file +// becomes available when this redundancy is >= 1. Assumes that every piece is +// unique within a file contract. -1 is returned if the file has size 0. It +// takes one argument, a map of offline contracts for this file. +func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) float64 { + sf.mu.RLock() + defer sf.mu.RUnlock() + if sf.metadata.fileSize == 0 { + return -1 + } + + minPiecesRenew := ^uint64(0) + minPiecesNoRenew := ^uint64(0) + for _, chunk := range sf.chunks { + // Loop over chunks and remember how many unique pieces of the chunk + // were goodForRenew and how many were not. + numPiecesRenew := uint64(0) + numPiecesNoRenew := uint64(0) + for _, pieceSet := range chunk.pieces { + // Remember if we encountered a goodForRenew piece or a + // !goodForRenew piece that was at least online. + foundGoodForRenew := false + foundOnline := false + for _, piece := range pieceSet { + offline, exists1 := offlineMap[string(piece.HostPubKey.Key)] + goodForRenew, exists2 := goodForRenewMap[string(piece.HostPubKey.Key)] + if exists1 != exists2 { + build.Critical("contract can't be in one map but not in the other") + } + if !exists1 || offline { + continue + } + // If we found a goodForRenew piece we can stop. + if goodForRenew { + foundGoodForRenew = true + break + } + // Otherwise we continue since there might be other hosts with + // the same piece that are goodForRenew. We still remember that + // we found an online piece though. + foundOnline = true + } + if foundGoodForRenew { + numPiecesRenew++ + numPiecesNoRenew++ + } else if foundOnline { + numPiecesNoRenew++ + } + } + // Remember the smallest number of goodForRenew pieces encountered. + if numPiecesRenew < minPiecesRenew { + minPiecesRenew = numPiecesRenew + } + // Remember the smallest number of !goodForRenew pieces encountered. + if numPiecesNoRenew < minPiecesNoRenew { + minPiecesNoRenew = numPiecesNoRenew + } + } + + // If the redundancy is smaller than 1x we return the redundancy that + // includes contracts that are not good for renewal. The reason for this is + // a better user experience. If the renter operates correctly, redundancy + // should never go above numPieces / minPieces and redundancyNoRenew should + // never go below 1. + redundancy := float64(minPiecesRenew) / float64(sf.chunks[0].erasureCode.MinPieces()) // TODO this shouldn't be chunks[0] + redundancyNoRenew := float64(minPiecesNoRenew) / float64(sf.chunks[0].erasureCode.MinPieces()) //TODO this shouldn't be chunks[0] + if redundancy < 1 { + return redundancyNoRenew + } + return redundancy +} + // UID returns a unique identifier for this file. func (sf *SiaFile) UID() string { sf.mu.RLock() diff --git a/modules/renter/upload.go b/modules/renter/upload.go index 576c28e238..6bf3d51985 100644 --- a/modules/renter/upload.go +++ b/modules/renter/upload.go @@ -28,6 +28,23 @@ var ( errUploadDirectory = errors.New("cannot upload directory") ) +// newFile is a helper to more easily create a new Siafile for testing. +func newFile(name string, rsc modules.ErasureCoder, pieceSize, fileSize uint64, mode os.FileMode, source string) *siafile.SiaFile { + numChunks := 1 + chunkSize := pieceSize * uint64(rsc.MinPieces()) + if fileSize > 0 { + numChunks = int(fileSize / chunkSize) + if fileSize%chunkSize != 0 { + numChunks++ + } + } + ecs := make([]modules.ErasureCoder, numChunks) + for i := 0; i < numChunks; i++ { + ecs[i] = rsc + } + return siafile.New(name, ecs, pieceSize, fileSize, mode, source) +} + // validateSource verifies that a sourcePath meets the // requirements for upload. func validateSource(sourcePath string) error { @@ -82,14 +99,13 @@ func (r *Renter) Upload(up modules.FileUploadParams) error { } // Create file object. - f := siafile.New(up.SiaPath, up.ErasureCode, pieceSize, uint64(fileInfo.Size())) - f.SetMode(fileInfo.Mode()) + f := newFile(up.SiaPath, up.ErasureCode, pieceSize, uint64(fileInfo.Size()), fileInfo.Mode(), up.Source) // Add file to renter. lockID = r.mu.Lock() r.files[up.SiaPath] = f r.persist.Tracking[up.SiaPath] = trackedFile{ - RepairPath: up.Source, + RepairPath: f.LocalPath(), } r.saveSync() err = r.saveFile(f) diff --git a/modules/renter/uploadchunk.go b/modules/renter/uploadchunk.go index 5daed93ad4..98e640128d 100644 --- a/modules/renter/uploadchunk.go +++ b/modules/renter/uploadchunk.go @@ -177,7 +177,7 @@ func (r *Renter) managedDownloadLogicalChunkData(chunk *unfinishedUploadChunk) e func (r *Renter) managedFetchAndRepairChunk(chunk *unfinishedUploadChunk) { // Calculate the amount of memory needed for erasure coding. This will need // to be released if there's an error before erasure coding is complete. - erasureCodingMemory := chunk.renterFile.PieceSize() * uint64(chunk.renterFile.ErasureCode().MinPieces()) + erasureCodingMemory := chunk.renterFile.PieceSize() * uint64(chunk.renterFile.ErasureCode(chunk.index).MinPieces()) // Calculate the amount of memory to release due to already completed // pieces. This memory gets released during encryption, but needs to be @@ -222,7 +222,7 @@ func (r *Renter) managedFetchAndRepairChunk(chunk *unfinishedUploadChunk) { // fact to reduce the total memory required to create the physical data. // That will also change the amount of memory we need to allocate, and the // number of times we need to return memory. - chunk.physicalChunkData, err = chunk.renterFile.ErasureCode().EncodeShards(chunk.logicalChunkData) + chunk.physicalChunkData, err = chunk.renterFile.ErasureCode(chunk.index).EncodeShards(chunk.logicalChunkData) chunk.logicalChunkData = nil r.memoryManager.Return(erasureCodingMemory) chunk.memoryReleased += erasureCodingMemory diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index ee0fa3c85d..1d8915bf2f 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -111,7 +111,14 @@ func (r *Renter) buildUnfinishedChunks(f *siafile.SiaFile, hosts map[string]stru } // If we don't have enough workers for the file, don't repair it right now. - if len(r.workerPool) < f.ErasureCode().MinPieces() { + minWorkers := 0 + for i := uint64(0); i < f.NumChunks(); i++ { + minPieces := f.ErasureCode(i).MinPieces() + if minPieces > minWorkers { + minWorkers = minPieces + } + } + if len(r.workerPool) < minWorkers { return nil } @@ -133,8 +140,8 @@ func (r *Renter) buildUnfinishedChunks(f *siafile.SiaFile, hosts map[string]stru }, index: i, - length: f.ChunkSize(), - offset: int64(i * f.ChunkSize()), + length: f.ChunkSize(i), + offset: int64(i * f.ChunkSize(i)), // memoryNeeded has to also include the logical data, and also // include the overhead for encryption. @@ -145,13 +152,13 @@ func (r *Renter) buildUnfinishedChunks(f *siafile.SiaFile, hosts map[string]stru // TODO: Currently we request memory for all of the pieces as well // as the minimum pieces, but we perhaps don't need to request all // of that. - memoryNeeded: f.PieceSize()*uint64(f.ErasureCode().NumPieces()+f.ErasureCode().MinPieces()) + uint64(f.ErasureCode().NumPieces()*crypto.TwofishOverhead), - minimumPieces: f.ErasureCode().MinPieces(), - piecesNeeded: f.ErasureCode().NumPieces(), + memoryNeeded: f.PieceSize()*uint64(f.ErasureCode(i).NumPieces()+f.ErasureCode(i).MinPieces()) + uint64(f.ErasureCode(i).NumPieces()*crypto.TwofishOverhead), + minimumPieces: f.ErasureCode(i).MinPieces(), + piecesNeeded: f.ErasureCode(i).NumPieces(), - physicalChunkData: make([][]byte, f.ErasureCode().NumPieces()), + physicalChunkData: make([][]byte, f.ErasureCode(i).NumPieces()), - pieceUsage: make([]bool, f.ErasureCode().NumPieces()), + pieceUsage: make([]bool, f.ErasureCode(i).NumPieces()), unusedHosts: make(map[string]struct{}), } // Every chunk can have a different set of unused hosts. From 461c9ab04c9536a0d716d1bcb6f46c555157429d Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 27 Jun 2018 13:08:10 -0400 Subject: [PATCH 12/15] add more logging to WaitForUploadRedundancy --- siatest/renter.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/siatest/renter.go b/siatest/renter.go index 0eac98b198..3601ef6d4b 100644 --- a/siatest/renter.go +++ b/siatest/renter.go @@ -270,7 +270,7 @@ func (tn *TestNode) WaitForUploadRedundancy(rf *RemoteFile, redundancy float64) return errors.New("file is not tracked by renter") } // Wait until it reaches the redundancy - return Retry(600, 100*time.Millisecond, func() error { + err := Retry(600, 100*time.Millisecond, func() error { file, err := tn.FileInfo(rf) if err != nil { return errors.AddContext(err, "couldn't retrieve FileInfo") @@ -280,6 +280,20 @@ func (tn *TestNode) WaitForUploadRedundancy(rf *RemoteFile, redundancy float64) } return nil }) + if err != nil { + rc, err2 := tn.RenterContractsGet() + if err2 != nil { + return errors.Compose(err, err2) + } + goodHosts := 0 + for _, contract := range rc.Contracts { + if contract.GoodForUpload { + goodHosts++ + } + } + return errors.Compose(err, fmt.Errorf("%v available hosts", goodHosts)) + } + return nil } // WaitForDecreasingRedundancy waits until the redundancy decreases to a From ef1f3fcebe100642a8cc64a13113a45722ef7f66 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 3 Jul 2018 17:35:31 -0400 Subject: [PATCH 13/15] change some fields to static --- modules/renter/siafile/compat.go | 56 +++++++-------- modules/renter/siafile/metadata.go | 50 ++++++------- modules/renter/siafile/siafile.go | 108 ++++++++++++++--------------- 3 files changed, 101 insertions(+), 113 deletions(-) diff --git a/modules/renter/siafile/compat.go b/modules/renter/siafile/compat.go index 62cbd43fb5..2426816920 100644 --- a/modules/renter/siafile/compat.go +++ b/modules/renter/siafile/compat.go @@ -34,23 +34,23 @@ type ( // previously created from a legacy file. func NewFromFileData(fd FileData) *SiaFile { file := &SiaFile{ - metadata: Metadata{ - fileSize: int64(fd.FileSize), - masterKey: fd.MasterKey, - mode: fd.Mode, - pieceSize: fd.PieceSize, - siaPath: fd.Name, + staticMetadata: Metadata{ + staticFileSize: int64(fd.FileSize), + staticMasterKey: fd.MasterKey, + mode: fd.Mode, + staticPieceSize: fd.PieceSize, + siaPath: fd.Name, }, - deleted: fd.Deleted, - uid: fd.UID, + deleted: fd.Deleted, + staticUID: fd.UID, } - file.chunks = make([]Chunk, len(fd.Chunks)) - for i := range file.chunks { - file.chunks[i].erasureCode = fd.ErasureCode - file.chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1} - binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[0:4], uint32(file.chunks[i].erasureCode.MinPieces())) - binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[4:8], uint32(file.chunks[i].erasureCode.NumPieces()-file.chunks[i].erasureCode.MinPieces())) - file.chunks[i].pieces = make([][]Piece, file.chunks[i].erasureCode.NumPieces()) + file.staticChunks = make([]Chunk, len(fd.Chunks)) + for i := range file.staticChunks { + file.staticChunks[i].staticErasureCode = fd.ErasureCode + file.staticChunks[i].staticErasureCodeType = [4]byte{0, 0, 0, 1} + binary.LittleEndian.PutUint32(file.staticChunks[i].staticErasureCodeParams[0:4], uint32(file.staticChunks[i].staticErasureCode.MinPieces())) + binary.LittleEndian.PutUint32(file.staticChunks[i].staticErasureCodeParams[4:8], uint32(file.staticChunks[i].staticErasureCode.NumPieces()-file.staticChunks[i].staticErasureCode.MinPieces())) + file.staticChunks[i].pieces = make([][]Piece, file.staticChunks[i].staticErasureCode.NumPieces()) } // Populate the pubKeyTable of the file and add the pieces. @@ -64,7 +64,7 @@ func NewFromFileData(fd FileData) *SiaFile { file.pubKeyTable = append(file.pubKeyTable, piece.HostPubKey) } // Add the piece to the SiaFile. - file.chunks[chunkIndex].pieces[pieceIndex] = append(file.chunks[chunkIndex].pieces[pieceIndex], Piece{ + file.staticChunks[chunkIndex].pieces[pieceIndex] = append(file.staticChunks[chunkIndex].pieces[pieceIndex], Piece{ HostPubKey: piece.HostPubKey, MerkleRoot: piece.MerkleRoot, }) @@ -80,23 +80,23 @@ func (sf *SiaFile) ExportFileData() FileData { sf.mu.RLock() defer sf.mu.RUnlock() fd := FileData{ - Name: sf.metadata.siaPath, - FileSize: uint64(sf.metadata.fileSize), - MasterKey: sf.metadata.masterKey, - ErasureCode: sf.chunks[0].erasureCode, - RepairPath: sf.metadata.localPath, - PieceSize: sf.metadata.pieceSize, - Mode: sf.metadata.mode, + Name: sf.staticMetadata.siaPath, + FileSize: uint64(sf.staticMetadata.staticFileSize), + MasterKey: sf.staticMetadata.staticMasterKey, + ErasureCode: sf.staticChunks[0].staticErasureCode, + RepairPath: sf.staticMetadata.localPath, + PieceSize: sf.staticMetadata.staticPieceSize, + Mode: sf.staticMetadata.mode, Deleted: sf.deleted, - UID: sf.uid, + UID: sf.staticUID, } // Return a deep-copy to avoid race conditions. - fd.Chunks = make([]FileChunk, len(sf.chunks)) + fd.Chunks = make([]FileChunk, len(sf.staticChunks)) for chunkIndex := range fd.Chunks { - fd.Chunks[chunkIndex].Pieces = make([][]Piece, len(sf.chunks[chunkIndex].pieces)) + fd.Chunks[chunkIndex].Pieces = make([][]Piece, len(sf.staticChunks[chunkIndex].pieces)) for pieceIndex := range fd.Chunks[chunkIndex].Pieces { - fd.Chunks[chunkIndex].Pieces[pieceIndex] = make([]Piece, len(sf.chunks[chunkIndex].pieces[pieceIndex])) - copy(fd.Chunks[chunkIndex].Pieces[pieceIndex], sf.chunks[chunkIndex].pieces[pieceIndex]) + fd.Chunks[chunkIndex].Pieces[pieceIndex] = make([]Piece, len(sf.staticChunks[chunkIndex].pieces[pieceIndex])) + copy(fd.Chunks[chunkIndex].Pieces[pieceIndex], sf.staticChunks[chunkIndex].pieces[pieceIndex]) } } return fd diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go index bc80df911d..4d2c6137f5 100644 --- a/modules/renter/siafile/metadata.go +++ b/modules/renter/siafile/metadata.go @@ -13,12 +13,12 @@ import ( type ( // Metadata is the metadata of a SiaFile and is JSON encoded. Metadata struct { - version [16]byte // version of the sia file format used - fileSize int64 // total size of the file - masterKey crypto.TwofishKey // masterkey used to encrypt pieces - pieceSize uint64 // size of a single piece of the file - localPath string // file to the local copy of the file used for repairing - siaPath string // the path of the file on the Sia network + staticVersion [16]byte // version of the sia file format used + staticFileSize int64 // total size of the file + staticMasterKey crypto.TwofishKey // masterkey used to encrypt pieces + staticPieceSize uint64 // size of a single piece of the file + localPath string // file to the local copy of the file used for repairing + siaPath string // the path of the file on the Sia network // The following fields are the usual unix timestamps of files. modTime time.Time // time of last content modification @@ -31,10 +31,10 @@ type ( uid int // id of the user who owns the file gid int // id of the group that owns the file - // chunkMetadataSize is the amount of space allocated within the + // staticChunkMetadataSize is the amount of space allocated within the // siafile for the metadata of a single chunk. It allows us to do // random access operations on the file in constant time. - chunkMetadataSize uint64 + staticChunkMetadataSize uint64 // The following fields are the offsets for data that is written to disk // after the pubKeyTable. We reserve a generous amount of space for the @@ -54,9 +54,7 @@ type ( // ChunkSize returns the size of a single chunk of the file. func (sf *SiaFile) ChunkSize(chunkIndex uint64) uint64 { - sf.mu.RLock() - defer sf.mu.RUnlock() - return sf.chunkSize(chunkIndex) + return sf.staticChunkSize(chunkIndex) } // Delete removes the file from disk and marks it as deleted. Once the file is @@ -108,28 +106,24 @@ func (sf *SiaFile) HostPublicKeys() []types.SiaPublicKey { func (sf *SiaFile) LocalPath() string { sf.mu.RLock() defer sf.mu.RUnlock() - return sf.metadata.localPath + return sf.staticMetadata.localPath } // MasterKey returns the masterkey used to encrypt the file. func (sf *SiaFile) MasterKey() crypto.TwofishKey { - sf.mu.RLock() - defer sf.mu.RUnlock() - return sf.metadata.masterKey + return sf.staticMetadata.staticMasterKey } // Mode returns the FileMode of the SiaFile. func (sf *SiaFile) Mode() os.FileMode { sf.mu.RLock() defer sf.mu.RUnlock() - return sf.metadata.mode + return sf.staticMetadata.mode } // PieceSize returns the size of a single piece of the file. func (sf *SiaFile) PieceSize() uint64 { - sf.mu.RLock() - defer sf.mu.RUnlock() - return sf.metadata.pieceSize + return sf.staticMetadata.staticPieceSize } // Rename changes the name of the file to a new one. @@ -138,7 +132,7 @@ func (sf *SiaFile) PieceSize() uint64 { func (sf *SiaFile) Rename(newName string) error { sf.mu.Lock() defer sf.mu.Unlock() - sf.metadata.siaPath = newName + sf.staticMetadata.siaPath = newName return nil } @@ -146,7 +140,7 @@ func (sf *SiaFile) Rename(newName string) error { func (sf *SiaFile) SetMode(mode os.FileMode) { sf.mu.Lock() defer sf.mu.Unlock() - sf.metadata.mode = mode + sf.staticMetadata.mode = mode } // SetLocalPath changes the local path of the file which is used to repair @@ -154,21 +148,19 @@ func (sf *SiaFile) SetMode(mode os.FileMode) { func (sf *SiaFile) SetLocalPath(path string) { sf.mu.Lock() defer sf.mu.Unlock() - sf.metadata.localPath = path + sf.staticMetadata.localPath = path } // SiaPath returns the file's sia path. func (sf *SiaFile) SiaPath() string { sf.mu.RLock() defer sf.mu.RUnlock() - return sf.metadata.siaPath + return sf.staticMetadata.siaPath } // Size returns the file's size. func (sf *SiaFile) Size() uint64 { - sf.mu.RLock() - defer sf.mu.RUnlock() - return uint64(sf.metadata.fileSize) + return uint64(sf.staticMetadata.staticFileSize) } // UploadedBytes indicates how many bytes of the file have been uploaded via @@ -178,7 +170,7 @@ func (sf *SiaFile) UploadedBytes() uint64 { sf.mu.RLock() defer sf.mu.RUnlock() var uploaded uint64 - for _, chunk := range sf.chunks { + for _, chunk := range sf.staticChunks { for _, pieceSet := range chunk.pieces { // Note: we need to multiply by SectorSize here instead of // f.pieceSize because the actual bytes uploaded include overhead @@ -202,6 +194,6 @@ func (sf *SiaFile) UploadProgress() float64 { } // ChunkSize returns the size of a single chunk of the file. -func (sf *SiaFile) chunkSize(chunkIndex uint64) uint64 { - return sf.metadata.pieceSize * uint64(sf.chunks[chunkIndex].erasureCode.MinPieces()) +func (sf *SiaFile) staticChunkSize(chunkIndex uint64) uint64 { + return sf.staticMetadata.staticPieceSize * uint64(sf.staticChunks[chunkIndex].staticErasureCode.MinPieces()) } diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index f79185b239..ad501d8b05 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -21,42 +21,42 @@ type ( // allows for easy constant-time updates of the file without having to read or // write the whole file. SiaFile struct { - // metadata is the mostly static metadata of a SiaFile. The reserved - // size of the metadata on disk should always be a multiple of 4kib. - // The metadata is also the only part of the file that is JSON encoded + // staticMetadata is the mostly static staticMetadata of a SiaFile. The reserved + // size of the staticMetadata on disk should always be a multiple of 4kib. + // The staticMetadata is also the only part of the file that is JSON encoded // and can therefore be easily extended. - metadata Metadata + staticMetadata Metadata // pubKeyTable stores the public keys of the hosts this file's pieces are uploaded to. // Since multiple pieces from different chunks might be uploaded to the same host, this // allows us to deduplicate the rather large public keys. pubKeyTable []types.SiaPublicKey - // chunks are the chunks the file was split into. - chunks []Chunk + // staticChunks are the staticChunks the file was split into. + staticChunks []Chunk // utility fields. These are not persisted. - deleted bool - mu sync.RWMutex - uid string + deleted bool + mu sync.RWMutex + staticUID string } // Chunk represents a single chunk of a file on disk Chunk struct { // erasure code settings. // - // erasureCodeType specifies the algorithm used for erasure coding + // staticErasureCodeType specifies the algorithm used for erasure coding // chunks. Available types are: // 0 - Invalid / Missing Code // 1 - Reed Solomon Code // // erasureCodeParams specifies possible parameters for a certain - // erasureCodeType. Currently params will be parsed as follows: + // staticErasureCodeType. Currently params will be parsed as follows: // Reed Solomon Code - 4 bytes dataPieces / 4 bytes parityPieces // - erasureCodeType [4]byte - erasureCodeParams [8]byte - erasureCode modules.ErasureCoder + staticErasureCodeType [4]byte + staticErasureCodeParams [8]byte + staticErasureCode modules.ErasureCoder // extensionInfo is some reserved space for each chunk that allows us // to indicate if a chunk is special. @@ -78,23 +78,23 @@ type ( // TODO needs changes once we move persistence over. func New(siaPath string, erasureCode []modules.ErasureCoder, pieceSize, fileSize uint64, fileMode os.FileMode, source string) *SiaFile { file := &SiaFile{ - metadata: Metadata{ - fileSize: int64(fileSize), - localPath: source, - masterKey: crypto.GenerateTwofishKey(), - mode: fileMode, - pieceSize: pieceSize, - siaPath: siaPath, + staticMetadata: Metadata{ + staticFileSize: int64(fileSize), + localPath: source, + staticMasterKey: crypto.GenerateTwofishKey(), + mode: fileMode, + staticPieceSize: pieceSize, + siaPath: siaPath, }, - uid: string(fastrand.Bytes(20)), + staticUID: string(fastrand.Bytes(20)), } - file.chunks = make([]Chunk, len(erasureCode)) - for i := range file.chunks { - file.chunks[i].erasureCode = erasureCode[i] - file.chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1} - binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[0:4], uint32(erasureCode[i].MinPieces())) - binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[4:8], uint32(erasureCode[i].NumPieces()-erasureCode[i].MinPieces())) - file.chunks[i].pieces = make([][]Piece, erasureCode[i].NumPieces()) + file.staticChunks = make([]Chunk, len(erasureCode)) + for i := range file.staticChunks { + file.staticChunks[i].staticErasureCode = erasureCode[i] + file.staticChunks[i].staticErasureCodeType = [4]byte{0, 0, 0, 1} + binary.LittleEndian.PutUint32(file.staticChunks[i].staticErasureCodeParams[0:4], uint32(erasureCode[i].MinPieces())) + binary.LittleEndian.PutUint32(file.staticChunks[i].staticErasureCodeParams[4:8], uint32(erasureCode[i].NumPieces()-erasureCode[i].MinPieces())) + file.staticChunks[i].pieces = make([][]Piece, erasureCode[i].NumPieces()) } return file } @@ -120,15 +120,15 @@ func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64 tableIndex = len(sf.pubKeyTable) - 1 } // Check if the chunkIndex is valid. - if chunkIndex >= uint64(len(sf.chunks)) { - return fmt.Errorf("chunkIndex %v out of bounds (%v)", chunkIndex, len(sf.chunks)) + if chunkIndex >= uint64(len(sf.staticChunks)) { + return fmt.Errorf("chunkIndex %v out of bounds (%v)", chunkIndex, len(sf.staticChunks)) } // Check if the pieceIndex is valid. - if pieceIndex >= uint64(len(sf.chunks[chunkIndex].pieces)) { - return fmt.Errorf("pieceIndex %v out of bounds (%v)", pieceIndex, len(sf.chunks[chunkIndex].pieces)) + if pieceIndex >= uint64(len(sf.staticChunks[chunkIndex].pieces)) { + return fmt.Errorf("pieceIndex %v out of bounds (%v)", pieceIndex, len(sf.staticChunks[chunkIndex].pieces)) } // Add the piece to the chunk. - sf.chunks[chunkIndex].pieces[pieceIndex] = append(sf.chunks[chunkIndex].pieces[pieceIndex], Piece{ + sf.staticChunks[chunkIndex].pieces[pieceIndex] = append(sf.staticChunks[chunkIndex].pieces[pieceIndex], Piece{ HostPubKey: pk, MerkleRoot: merkleRoot, }) @@ -141,7 +141,7 @@ func (sf *SiaFile) Available(offline map[string]bool) bool { defer sf.mu.RUnlock() // We need to find at least erasureCode.MinPieces different pieces for each // chunk for the file to be available. - for chunkIndex, chunk := range sf.chunks { + for chunkIndex, chunk := range sf.staticChunks { piecesForChunk := 0 for _, pieceSet := range chunk.pieces { for _, piece := range pieceSet { @@ -150,11 +150,11 @@ func (sf *SiaFile) Available(offline map[string]bool) bool { break // break out since we only count unique pieces } } - if piecesForChunk >= sf.chunks[chunkIndex].erasureCode.MinPieces() { + if piecesForChunk >= sf.staticChunks[chunkIndex].staticErasureCode.MinPieces() { break // we already have enough pieces for this chunk. } } - if piecesForChunk < sf.chunks[chunkIndex].erasureCode.MinPieces() { + if piecesForChunk < sf.staticChunks[chunkIndex].staticErasureCode.MinPieces() { return false // this chunk isn't available. } } @@ -165,20 +165,18 @@ func (sf *SiaFile) Available(offline map[string]bool) bool { // offset of a file and also the relative offset within the chunk. If the // offset is out of bounds, chunkIndex will be equal to NumChunk(). func (sf *SiaFile) ChunkIndexByOffset(offset uint64) (chunkIndex uint64, off uint64) { - for chunkIndex := uint64(0); chunkIndex < uint64(len(sf.chunks)); chunkIndex++ { - if sf.chunkSize(chunkIndex) > offset { + for chunkIndex := uint64(0); chunkIndex < uint64(len(sf.staticChunks)); chunkIndex++ { + if sf.staticChunkSize(chunkIndex) > offset { return chunkIndex, offset } - offset -= sf.chunkSize(chunkIndex) + offset -= sf.staticChunkSize(chunkIndex) } return } // ErasureCode returns the erasure coder used by the file. func (sf *SiaFile) ErasureCode(chunkIndex uint64) modules.ErasureCoder { - sf.mu.RLock() - defer sf.mu.RUnlock() - return sf.chunks[chunkIndex].erasureCode + return sf.staticChunks[chunkIndex].staticErasureCode } // NumChunks returns the number of chunks the file consists of. This will @@ -187,7 +185,7 @@ func (sf *SiaFile) ErasureCode(chunkIndex uint64) modules.ErasureCoder { func (sf *SiaFile) NumChunks() uint64 { sf.mu.RLock() defer sf.mu.RUnlock() - return uint64(len(sf.chunks)) + return uint64(len(sf.staticChunks)) } // Pieces returns all the pieces for a chunk in a slice of slices that contains @@ -195,14 +193,14 @@ func (sf *SiaFile) NumChunks() uint64 { func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) { sf.mu.RLock() defer sf.mu.RUnlock() - if chunkIndex >= uint64(len(sf.chunks)) { - panic(fmt.Sprintf("index %v out of bounds (%v)", chunkIndex, len(sf.chunks))) + if chunkIndex >= uint64(len(sf.staticChunks)) { + panic(fmt.Sprintf("index %v out of bounds (%v)", chunkIndex, len(sf.staticChunks))) } // Return a deep-copy to avoid race conditions. - pieces := make([][]Piece, len(sf.chunks[chunkIndex].pieces)) + pieces := make([][]Piece, len(sf.staticChunks[chunkIndex].pieces)) for pieceIndex := range pieces { - pieces[pieceIndex] = make([]Piece, len(sf.chunks[chunkIndex].pieces[pieceIndex])) - copy(pieces[pieceIndex], sf.chunks[chunkIndex].pieces[pieceIndex]) + pieces[pieceIndex] = make([]Piece, len(sf.staticChunks[chunkIndex].pieces[pieceIndex])) + copy(pieces[pieceIndex], sf.staticChunks[chunkIndex].pieces[pieceIndex]) } return pieces, nil } @@ -214,13 +212,13 @@ func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) { func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) float64 { sf.mu.RLock() defer sf.mu.RUnlock() - if sf.metadata.fileSize == 0 { + if sf.staticMetadata.staticFileSize == 0 { return -1 } minPiecesRenew := ^uint64(0) minPiecesNoRenew := ^uint64(0) - for _, chunk := range sf.chunks { + for _, chunk := range sf.staticChunks { // Loop over chunks and remember how many unique pieces of the chunk // were goodForRenew and how many were not. numPiecesRenew := uint64(0) @@ -271,8 +269,8 @@ func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[st // a better user experience. If the renter operates correctly, redundancy // should never go above numPieces / minPieces and redundancyNoRenew should // never go below 1. - redundancy := float64(minPiecesRenew) / float64(sf.chunks[0].erasureCode.MinPieces()) // TODO this shouldn't be chunks[0] - redundancyNoRenew := float64(minPiecesNoRenew) / float64(sf.chunks[0].erasureCode.MinPieces()) //TODO this shouldn't be chunks[0] + redundancy := float64(minPiecesRenew) / float64(sf.staticChunks[0].staticErasureCode.MinPieces()) // TODO this shouldn't be chunks[0] + redundancyNoRenew := float64(minPiecesNoRenew) / float64(sf.staticChunks[0].staticErasureCode.MinPieces()) //TODO this shouldn't be chunks[0] if redundancy < 1 { return redundancyNoRenew } @@ -281,7 +279,5 @@ func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[st // UID returns a unique identifier for this file. func (sf *SiaFile) UID() string { - sf.mu.RLock() - defer sf.mu.RUnlock() - return sf.uid + return sf.staticUID } From 6181e0abbafa2fa440f5c7286c27fee5f9d3432e Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 3 Jul 2018 19:08:03 -0400 Subject: [PATCH 14/15] fix redundancy for multiple erasure codes --- modules/renter/siafile/siafile.go | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index ad501d8b05..41b5184719 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "fmt" + "math" "os" "sync" @@ -216,8 +217,8 @@ func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[st return -1 } - minPiecesRenew := ^uint64(0) - minPiecesNoRenew := ^uint64(0) + minRedundancy := math.MaxFloat64 + minRedundancyNoRenew := math.MaxFloat64 for _, chunk := range sf.staticChunks { // Loop over chunks and remember how many unique pieces of the chunk // were goodForRenew and how many were not. @@ -254,13 +255,13 @@ func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[st numPiecesNoRenew++ } } - // Remember the smallest number of goodForRenew pieces encountered. - if numPiecesRenew < minPiecesRenew { - minPiecesRenew = numPiecesRenew + redundancy := float64(numPiecesRenew) / float64(chunk.staticErasureCode.MinPieces()) + if redundancy < minRedundancy { + minRedundancy = redundancy } - // Remember the smallest number of !goodForRenew pieces encountered. - if numPiecesNoRenew < minPiecesNoRenew { - minPiecesNoRenew = numPiecesNoRenew + redundancyNoRenew := float64(numPiecesNoRenew) / float64(chunk.staticErasureCode.MinPieces()) + if redundancyNoRenew < minRedundancyNoRenew { + minRedundancyNoRenew = redundancyNoRenew } } @@ -269,12 +270,10 @@ func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[st // a better user experience. If the renter operates correctly, redundancy // should never go above numPieces / minPieces and redundancyNoRenew should // never go below 1. - redundancy := float64(minPiecesRenew) / float64(sf.staticChunks[0].staticErasureCode.MinPieces()) // TODO this shouldn't be chunks[0] - redundancyNoRenew := float64(minPiecesNoRenew) / float64(sf.staticChunks[0].staticErasureCode.MinPieces()) //TODO this shouldn't be chunks[0] - if redundancy < 1 { - return redundancyNoRenew + if minRedundancy < 1 { + return minRedundancyNoRenew } - return redundancy + return minRedundancy } // UID returns a unique identifier for this file. From b240acbb0681e5e924302dfcc839f2c844611059 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 11 Jul 2018 10:07:25 -0400 Subject: [PATCH 15/15] review comments --- modules/renter/siafile/metadata.go | 15 +++++++++------ modules/renter/siafile/siafile.go | 5 +++-- modules/renter/upload.go | 2 +- modules/renter/uploadheap.go | 17 ++++++++++------- 4 files changed, 23 insertions(+), 16 deletions(-) diff --git a/modules/renter/siafile/metadata.go b/modules/renter/siafile/metadata.go index 4d2c6137f5..6ccab5c0ab 100644 --- a/modules/renter/siafile/metadata.go +++ b/modules/renter/siafile/metadata.go @@ -13,12 +13,15 @@ import ( type ( // Metadata is the metadata of a SiaFile and is JSON encoded. Metadata struct { - staticVersion [16]byte // version of the sia file format used - staticFileSize int64 // total size of the file - staticMasterKey crypto.TwofishKey // masterkey used to encrypt pieces - staticPieceSize uint64 // size of a single piece of the file - localPath string // file to the local copy of the file used for repairing - siaPath string // the path of the file on the Sia network + staticVersion [16]byte // version of the sia file format used + staticFileSize int64 // total size of the file + staticPieceSize uint64 // size of a single piece of the file + localPath string // file to the local copy of the file used for repairing + siaPath string // the path of the file on the Sia network + + // fields for encryption + staticMasterKey crypto.TwofishKey // masterkey used to encrypt pieces + staticSharingKey crypto.TwofishKey // key used to encrypt shared pieces // The following fields are the usual unix timestamps of files. modTime time.Time // time of last content modification diff --git a/modules/renter/siafile/siafile.go b/modules/renter/siafile/siafile.go index 41b5184719..03ae0d4ada 100644 --- a/modules/renter/siafile/siafile.go +++ b/modules/renter/siafile/siafile.go @@ -69,7 +69,6 @@ type ( // Piece represents a single piece of a chunk on disk Piece struct { - KeyNonce [4]byte // nonce used for encrypting the piece HostPubKey types.SiaPublicKey // public key of the host MerkleRoot crypto.Hash // merkle root of the piece } @@ -270,7 +269,9 @@ func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[st // a better user experience. If the renter operates correctly, redundancy // should never go above numPieces / minPieces and redundancyNoRenew should // never go below 1. - if minRedundancy < 1 { + if minRedundancy < 1 && minRedundancyNoRenew >= 1 { + return 1 + } else if minRedundancy < 1 { return minRedundancyNoRenew } return minRedundancy diff --git a/modules/renter/upload.go b/modules/renter/upload.go index 6bf3d51985..6690a635ea 100644 --- a/modules/renter/upload.go +++ b/modules/renter/upload.go @@ -28,7 +28,7 @@ var ( errUploadDirectory = errors.New("cannot upload directory") ) -// newFile is a helper to more easily create a new Siafile for testing. +// newFile is a helper to more easily create a new Siafile. func newFile(name string, rsc modules.ErasureCoder, pieceSize, fileSize uint64, mode os.FileMode, source string) *siafile.SiaFile { numChunks := 1 chunkSize := pieceSize * uint64(rsc.MinPieces()) diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index 1d8915bf2f..c1b0d0d06f 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -173,9 +173,9 @@ func (r *Renter) buildUnfinishedChunks(f *siafile.SiaFile, hosts map[string]stru pks[string(pk.Key)] = pk } - // Iterate through the pieces of the file and mark which hosts are already - // in use for the chunk. As you delete hosts from the 'unusedHosts' map, - // also increment the 'piecesCompleted' value. + // Iterate through the pieces of all chunks of the file and mark which + // hosts are already in use for a particular chunk. As you delete hosts + // from the 'unusedHosts' map, also increment the 'piecesCompleted' value. for chunkIndex := uint64(0); chunkIndex < f.NumChunks(); chunkIndex++ { pieces, err := f.Pieces(chunkIndex) if err != nil { @@ -208,10 +208,13 @@ func (r *Renter) buildUnfinishedChunks(f *siafile.SiaFile, hosts map[string]stru newUnfinishedChunks[chunkIndex].piecesCompleted++ delete(newUnfinishedChunks[chunkIndex].unusedHosts, pk.String()) } else if exists { - // This host has a piece, but it is the same piece another host - // has. We should still remove the host from the unusedHosts - // since one host having multiple pieces of a chunk might lead - // to unexpected issues. + // This host has a piece, but it is the same piece another + // host has. We should still remove the host from the + // unusedHosts since one host having multiple pieces of a + // chunk might lead to unexpected issues. e.g. if a host + // has multiple pieces and another host with redundant + // pieces goes offline, we end up with false redundancy + // reporting. delete(newUnfinishedChunks[chunkIndex].unusedHosts, pk.String()) } }