Skip to content
This repository has been archived by the owner on Nov 2, 2018. It is now read-only.

Commit

Permalink
move erasure code to chunks, add localPath and move available out of …
Browse files Browse the repository at this point in the history
…metadata.go
  • Loading branch information
ChrisSchinnerl committed Jun 27, 2018
1 parent 8955d4e commit 198c092
Show file tree
Hide file tree
Showing 12 changed files with 264 additions and 197 deletions.
30 changes: 20 additions & 10 deletions modules/renter/download.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,8 +377,18 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
}

// Determine which chunks to download.
minChunk := params.offset / params.file.ChunkSize()
maxChunk := (params.offset + params.length - 1) / params.file.ChunkSize()
minChunk, minChunkOffset := params.file.ChunkIndexByOffset(params.offset)
maxChunk, maxChunkOffset := params.file.ChunkIndexByOffset(params.offset + params.length)
if minChunk == params.file.NumChunks() || maxChunk == params.file.NumChunks() {
return nil, errors.New("download is requesting a chunk that is past the boundary of the file")
}
// If the maxChunkOffset is exactly 0 we need to subtract 1 chunk. e.g. if
// the chunkSize is 100 bytes and we want to download 100 bytes from offset
// 0, maxChunk would be 1 and maxChunkOffset would be 0. We want maxChunk
// to be 0 though since we don't actually need any data from chunk 1.
if maxChunk > 0 && maxChunkOffset == 0 {
maxChunk--
}

// For each chunk, assemble a mapping from the contract id to the index of
// the piece within the chunk that the contract is responsible for.
Expand Down Expand Up @@ -413,13 +423,13 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
for i := minChunk; i <= maxChunk; i++ {
udc := &unfinishedDownloadChunk{
destination: params.destination,
erasureCode: params.file.ErasureCode(),
erasureCode: params.file.ErasureCode(i),
masterKey: params.file.MasterKey(),

staticChunkIndex: i,
staticCacheID: fmt.Sprintf("%v:%v", d.staticSiaPath, i),
staticChunkMap: chunkMaps[i-minChunk],
staticChunkSize: params.file.ChunkSize(),
staticChunkSize: params.file.ChunkSize(i),
staticPieceSize: params.file.PieceSize(),

// TODO: 25ms is just a guess for a good default. Really, we want to
Expand All @@ -435,8 +445,8 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
staticNeedsMemory: params.needsMemory,
staticPriority: params.priority,

physicalChunkData: make([][]byte, params.file.ErasureCode().NumPieces()),
pieceUsage: make([]bool, params.file.ErasureCode().NumPieces()),
physicalChunkData: make([][]byte, params.file.ErasureCode(i).NumPieces()),
pieceUsage: make([]bool, params.file.ErasureCode(i).NumPieces()),

download: d,
staticStreamCache: r.staticStreamCache,
Expand All @@ -445,16 +455,16 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
// Set the fetchOffset - the offset within the chunk that we start
// downloading from.
if i == minChunk {
udc.staticFetchOffset = params.offset % params.file.ChunkSize()
udc.staticFetchOffset = minChunkOffset
} else {
udc.staticFetchOffset = 0
}
// Set the fetchLength - the number of bytes to fetch within the chunk
// that we start downloading from.
if i == maxChunk && (params.length+params.offset)%params.file.ChunkSize() != 0 {
udc.staticFetchLength = ((params.length + params.offset) % params.file.ChunkSize()) - udc.staticFetchOffset
if i == maxChunk && maxChunkOffset != 0 {
udc.staticFetchLength = maxChunkOffset - udc.staticFetchOffset
} else {
udc.staticFetchLength = params.file.ChunkSize() - udc.staticFetchOffset
udc.staticFetchLength = params.file.ChunkSize(i) - udc.staticFetchOffset
}
// Set the writeOffset within the destination for where the data should
// be written.
Expand Down
8 changes: 6 additions & 2 deletions modules/renter/downloadstreamer.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,14 @@ func (s *streamer) Read(p []byte) (n int, err error) {
}

// Calculate how much we can download. We never download more than a single chunk.
chunkSize := s.file.ChunkSize()
chunkIndex, chunkOffset := s.file.ChunkIndexByOffset(uint64(s.offset))
if chunkIndex == s.file.NumChunks() {
return 0, io.EOF
}
chunkSize := s.file.ChunkSize(chunkIndex)
remainingData := uint64(fileSize - s.offset)
requestedData := uint64(len(p))
remainingChunk := chunkSize - uint64(s.offset)%chunkSize
remainingChunk := chunkSize - chunkOffset
length := min(remainingData, requestedData, remainingChunk)

// Download data
Expand Down
3 changes: 2 additions & 1 deletion modules/renter/files.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,12 +261,13 @@ func (r *Renter) RenameFile(currentName, newName string) error {

// fileToSiaFile converts a legacy file to a SiaFile. Fields that can't be
// populated using the legacy file remain blank.
func (r *Renter) fileToSiaFile(f *file) *siafile.SiaFile {
func (r *Renter) fileToSiaFile(f *file, repairPath string) *siafile.SiaFile {
fileData := siafile.FileData{
Name: f.name,
FileSize: f.size,
MasterKey: f.masterKey,
ErasureCode: f.erasureCode,
RepairPath: repairPath,
PieceSize: f.pieceSize,
Mode: os.FileMode(f.mode),
Deleted: f.deleted,
Expand Down
31 changes: 17 additions & 14 deletions modules/renter/files_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (

"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/modules/renter/siafile"
"github.com/NebulousLabs/Sia/types"
"github.com/NebulousLabs/errors"
)
Expand All @@ -32,7 +31,7 @@ func TestFileNumChunks(t *testing.T) {

for _, test := range tests {
rsc, _ := NewRSCode(test.piecesPerChunk, 1) // can't use 0
f := siafile.New(t.Name(), rsc, test.pieceSize, test.size)
f := newFile(t.Name(), rsc, test.pieceSize, test.size, 0777, "")
if f.NumChunks() != test.expNumChunks {
t.Errorf("Test %v: expected %v, got %v", test, test.expNumChunks, f.NumChunks())
}
Expand All @@ -42,7 +41,7 @@ func TestFileNumChunks(t *testing.T) {
// TestFileAvailable probes the available method of the file type.
func TestFileAvailable(t *testing.T) {
rsc, _ := NewRSCode(1, 1) // can't use 0
f := siafile.New(t.Name(), rsc, pieceSize, 100)
f := newFile(t.Name(), rsc, pieceSize, 100, 0777, "")
neverOffline := make(map[string]bool)

if f.Available(neverOffline) {
Expand All @@ -69,7 +68,7 @@ func TestFileAvailable(t *testing.T) {
func TestFileUploadedBytes(t *testing.T) {
// ensure that a piece fits within a sector
rsc, _ := NewRSCode(1, 3)
f := siafile.New(t.Name(), rsc, modules.SectorSize/2, 1000)
f := newFile(t.Name(), rsc, modules.SectorSize/2, 1000, 0777, "")
for i := uint64(0); i < 4; i++ {
err := f.AddPiece(types.SiaPublicKey{}, uint64(0), i, crypto.Hash{})
if err != nil {
Expand All @@ -85,7 +84,7 @@ func TestFileUploadedBytes(t *testing.T) {
// 100%, even if more pieces have been uploaded,
func TestFileUploadProgressPinning(t *testing.T) {
rsc, _ := NewRSCode(1, 1)
f := siafile.New(t.Name(), rsc, 2, 4)
f := newFile(t.Name(), rsc, 2, 4, 0777, "")
for i := uint64(0); i < 2; i++ {
err1 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, uint64(0), i, crypto.Hash{})
err2 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, uint64(0), i, crypto.Hash{})
Expand All @@ -111,7 +110,7 @@ func TestFileRedundancy(t *testing.T) {

for _, nData := range nDatas {
rsc, _ := NewRSCode(nData, 10)
f := siafile.New(t.Name(), rsc, 100, 1000)
f := newFile(t.Name(), rsc, 100, 1000, 0777, "")
// Test that an empty file has 0 redundancy.
if r := f.Redundancy(neverOffline, goodForRenew); r != 0 {
t.Error("expected 0 redundancy, got", r)
Expand Down Expand Up @@ -145,33 +144,33 @@ func TestFileRedundancy(t *testing.T) {
t.Fatal(err)
}
// 1.0 / MinPieces because the chunk with the least number of pieces has 1 piece.
expectedR := 1.0 / float64(f.ErasureCode().MinPieces())
expectedR := 1.0 / float64(f.ErasureCode(0).MinPieces())
if r := f.Redundancy(neverOffline, goodForRenew); r != expectedR {
t.Errorf("expected %f redundancy, got %f", expectedR, r)
}
// Test that adding a file contract that has erasureCode.MinPieces() pieces
// per chunk for all chunks results in a file with redundancy > 1.
for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ {
for iPiece := uint64(1); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ {
for iPiece := uint64(1); iPiece < uint64(f.ErasureCode(0).MinPieces()); iPiece++ {
err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(3)}}, iChunk, iPiece, crypto.Hash{})
if err != nil {
t.Fatal(err)
}
}
err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(4)}}, iChunk, uint64(f.ErasureCode().MinPieces()), crypto.Hash{})
err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(4)}}, iChunk, uint64(f.ErasureCode(0).MinPieces()), crypto.Hash{})
if err != nil {
t.Fatal(err)
}
}
// 1+MinPieces / MinPieces because the chunk with the least number of pieces has 1+MinPieces pieces.
expectedR = float64(1+f.ErasureCode().MinPieces()) / float64(f.ErasureCode().MinPieces())
expectedR = float64(1+f.ErasureCode(0).MinPieces()) / float64(f.ErasureCode(0).MinPieces())
if r := f.Redundancy(neverOffline, goodForRenew); r != expectedR {
t.Errorf("expected %f redundancy, got %f", expectedR, r)
}

// verify offline file contracts are not counted in the redundancy
for iChunk := uint64(0); iChunk < f.NumChunks(); iChunk++ {
for iPiece := uint64(0); iPiece < uint64(f.ErasureCode().MinPieces()); iPiece++ {
for iPiece := uint64(0); iPiece < uint64(f.ErasureCode(0).MinPieces()); iPiece++ {
err := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(5)}}, iChunk, iPiece, crypto.Hash{})
if err != nil {
t.Fatal(err)
Expand All @@ -191,7 +190,8 @@ func TestFileRedundancy(t *testing.T) {

// TestFileExpiration probes the expiration method of the file type.
func TestFileExpiration(t *testing.T) {
f := newTestingFile()
rsc, _ := NewRSCode(1, 2)
f := newFile(t.Name(), rsc, pieceSize, 1000, 0777, "")
contracts := make(map[string]modules.RenterContract)
if f.Expiration(contracts) != 0 {
t.Error("file with no pieces should report as having no time remaining")
Expand Down Expand Up @@ -245,9 +245,10 @@ func TestRenterFileListLocalPath(t *testing.T) {
defer rt.Close()
id := rt.renter.mu.Lock()
f := newTestingFile()
f.SetLocalPath("TestPath")
rt.renter.files[f.SiaPath()] = f
rt.renter.persist.Tracking[f.SiaPath()] = trackedFile{
RepairPath: "TestPath",
RepairPath: f.LocalPath(),
}
rt.renter.mu.Unlock(id)
files := rt.renter.FileList()
Expand Down Expand Up @@ -414,7 +415,9 @@ func TestRenterRenameFile(t *testing.T) {
}

// Renaming should also update the tracking set
rt.renter.persist.Tracking["1"] = trackedFile{"foo"}
rt.renter.persist.Tracking["1"] = trackedFile{
RepairPath: f2.LocalPath(),
}
err = rt.renter.RenameFile("1", "1b")
if err != nil {
t.Fatal(err)
Expand Down
4 changes: 2 additions & 2 deletions modules/renter/persist.go
Original file line number Diff line number Diff line change
Expand Up @@ -421,12 +421,12 @@ func (r *Renter) loadSharedFiles(reader io.Reader) ([]string, error) {
// Add files to renter.
names := make([]string, numFiles)
for i, f := range files {
r.files[f.name] = r.fileToSiaFile(f)
r.files[f.name] = r.fileToSiaFile(f, r.persist.Tracking[f.name].RepairPath)
names[i] = f.name
}
// Save the files.
for _, f := range files {
r.saveFile(r.fileToSiaFile(f))
r.saveFile(r.fileToSiaFile(f, r.persist.Tracking[f.name].RepairPath))
}

return names, nil
Expand Down
2 changes: 1 addition & 1 deletion modules/renter/persist_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func newTestingFile() *siafile.SiaFile {

name := "testfile-" + strconv.Itoa(int(data[0]))

return siafile.New(name, rsc, pieceSize, 1000)
return newFile(name, rsc, pieceSize, 1000, 0777, "")
}

// equalFiles is a helper function that compares two files for equality.
Expand Down
23 changes: 12 additions & 11 deletions modules/renter/siafile/compat.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ type (
FileSize uint64
MasterKey crypto.TwofishKey
ErasureCode modules.ErasureCoder
RepairPath string
PieceSize uint64
Mode os.FileMode
Deleted bool
Expand All @@ -40,18 +41,17 @@ func NewFromFileData(fd FileData) *SiaFile {
pieceSize: fd.PieceSize,
siaPath: fd.Name,
},
deleted: fd.Deleted,
erasureCode: fd.ErasureCode,
uid: fd.UID,
deleted: fd.Deleted,
uid: fd.UID,
}
chunks := make([]Chunk, file.NumChunks())
for i := range chunks {
chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1}
binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[0:4], uint32(file.erasureCode.MinPieces()))
binary.LittleEndian.PutUint32(chunks[i].erasureCodeParams[4:8], uint32(file.erasureCode.NumPieces()-file.erasureCode.MinPieces()))
chunks[i].pieces = make([][]Piece, file.erasureCode.NumPieces())
file.chunks = make([]Chunk, len(fd.Chunks))
for i := range file.chunks {
file.chunks[i].erasureCode = fd.ErasureCode
file.chunks[i].erasureCodeType = [4]byte{0, 0, 0, 1}
binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[0:4], uint32(file.chunks[i].erasureCode.MinPieces()))
binary.LittleEndian.PutUint32(file.chunks[i].erasureCodeParams[4:8], uint32(file.chunks[i].erasureCode.NumPieces()-file.chunks[i].erasureCode.MinPieces()))
file.chunks[i].pieces = make([][]Piece, file.chunks[i].erasureCode.NumPieces())
}
file.chunks = chunks

// Populate the pubKeyTable of the file and add the pieces.
pubKeyMap := make(map[string]int)
Expand Down Expand Up @@ -83,7 +83,8 @@ func (sf *SiaFile) ExportFileData() FileData {
Name: sf.metadata.siaPath,
FileSize: uint64(sf.metadata.fileSize),
MasterKey: sf.metadata.masterKey,
ErasureCode: sf.erasureCode,
ErasureCode: sf.chunks[0].erasureCode,
RepairPath: sf.metadata.localPath,
PieceSize: sf.metadata.pieceSize,
Mode: sf.metadata.mode,
Deleted: sf.deleted,
Expand Down
Loading

0 comments on commit 198c092

Please sign in to comment.