Commit ca04cd25 authored by David Vorick's avatar David Vorick

Merge branch 'siafile-metadata-loading' into 'master'

Add fileinfo caching to SiaFile

See merge request !3561
parents c91894e8 a87051bd
Pipeline #57542603 failed with stages
in 101 minutes and 36 seconds
......@@ -70,6 +70,8 @@ func (r *Renter) DirList(siaPath modules.SiaPath) ([]modules.DirectoryInfo, []mo
}
defer r.tg.Done()
// Get utility maps.
offline, goodForRenew, contracts := r.managedContractUtilityMaps()
var dirs []modules.DirectoryInfo
var files []modules.FileInfo
// Get DirectoryInfo
......@@ -108,7 +110,7 @@ func (r *Renter) DirList(siaPath modules.SiaPath) ([]modules.DirectoryInfo, []mo
if err != nil {
return nil, nil, err
}
file, err := r.File(fileSiaPath)
file, err := r.staticFileSet.CachedFileInfo(fileSiaPath, offline, goodForRenew, contracts)
if err != nil {
return nil, nil, err
}
......
package renter
import (
"math"
"os"
"path/filepath"
"strings"
"sync"
"gitlab.com/NebulousLabs/Sia/modules/renter/siadir"
......@@ -85,40 +82,7 @@ func (r *Renter) FileList() ([]modules.FileInfo, error) {
}
defer r.tg.Done()
offlineMap, goodForRenewMap, contractsMap := r.managedContractUtilityMaps()
fileList := []modules.FileInfo{}
err := filepath.Walk(r.staticFilesDir, func(path string, info os.FileInfo, err error) error {
// This error is non-nil if filepath.Walk couldn't stat a file or
// folder. We simply ignore missing files.
if os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
// Skip folders and non-sia files.
if info.IsDir() || filepath.Ext(path) != modules.SiaFileExtension {
return nil
}
// Load the Siafile.
str := strings.TrimSuffix(strings.TrimPrefix(path, r.staticFilesDir), modules.SiaFileExtension)
siaPath, err := modules.NewSiaPath(str)
if err != nil {
return err
}
file, err := r.fileInfo(siaPath, offlineMap, goodForRenewMap, contractsMap)
if os.IsNotExist(err) || err == siafile.ErrUnknownPath {
return nil
}
if err != nil {
return err
}
fileList = append(fileList, file)
return nil
})
return fileList, err
return r.staticFileSet.FileList(offlineMap, goodForRenewMap, contractsMap)
}
// File returns file from siaPath queried by user.
......@@ -129,7 +93,7 @@ func (r *Renter) File(siaPath modules.SiaPath) (modules.FileInfo, error) {
}
defer r.tg.Done()
offline, goodForRenew, contracts := r.managedContractUtilityMaps()
return r.fileInfo(siaPath, offline, goodForRenew, contracts)
return r.staticFileSet.FileInfo(siaPath, offline, goodForRenew, contracts)
}
// RenameFile takes an existing file and changes the nickname. The original
......@@ -185,54 +149,6 @@ func (r *Renter) SetFileStuck(siaPath modules.SiaPath, stuck bool) error {
return entry.SetAllStuck(stuck)
}
// fileInfo returns information on a siafile. As a performance optimization, the
// fileInfo takes the maps returned by renter.managedContractUtilityMaps as
// many files at once.
func (r *Renter) fileInfo(siaPath modules.SiaPath, offline map[string]bool, goodForRenew map[string]bool, contracts map[string]modules.RenterContract) (modules.FileInfo, error) {
// Get the file and its contracts
entry, err := r.staticFileSet.Open(siaPath)
if err != nil {
return modules.FileInfo{}, err
}
defer entry.Close()
// Build the FileInfo
var onDisk bool
localPath := entry.LocalPath()
if localPath != "" {
_, err = os.Stat(localPath)
onDisk = err == nil
}
redundancy := entry.Redundancy(offline, goodForRenew)
health, stuckHealth, numStuckChunks := entry.Health(offline, goodForRenew)
fileInfo := modules.FileInfo{
AccessTime: entry.AccessTime(),
Available: redundancy >= 1,
ChangeTime: entry.ChangeTime(),
CipherType: entry.MasterKey().Type().String(),
CreateTime: entry.CreateTime(),
Expiration: entry.Expiration(contracts),
Filesize: entry.Size(),
Health: health,
LocalPath: localPath,
MaxHealth: math.Max(health, stuckHealth),
MaxHealthPercent: entry.HealthPercentage(math.Max(health, stuckHealth)),
ModTime: entry.ModTime(),
NumStuckChunks: numStuckChunks,
OnDisk: onDisk,
Recoverable: onDisk || redundancy >= 1,
Redundancy: redundancy,
Renewing: true,
SiaPath: r.staticFileSet.SiaPath(entry),
Stuck: numStuckChunks > 0,
StuckHealth: stuckHealth,
UploadedBytes: entry.UploadedBytes(),
UploadProgress: entry.UploadProgress(),
}
return fileInfo, nil
}
// fileToSiaFile converts a legacy file to a SiaFile. Fields that can't be
// populated using the legacy file remain blank.
func (r *Renter) fileToSiaFile(f *file, repairPath string, oldContracts []modules.RenterContract) (*siafile.SiaFileSetEntry, error) {
......
......@@ -12,7 +12,6 @@ import (
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/Sia/modules/renter/siafile"
"gitlab.com/NebulousLabs/Sia/types"
"gitlab.com/NebulousLabs/errors"
"gitlab.com/NebulousLabs/fastrand"
"gitlab.com/NebulousLabs/writeaheadlog"
)
......@@ -136,49 +135,12 @@ func TestFileNumChunks(t *testing.T) {
}
}
// TestFileUploadedBytes tests that uploadedBytes() returns a value equal to
// the number of sectors stored via contract times the size of each sector.
func TestFileUploadedBytes(t *testing.T) {
// ensure that a piece fits within a sector
rsc, _ := siafile.NewRSCode(1, 3)
f, err := newFileTesting(t.Name(), newTestingWal(), rsc, 1000, 0777, "")
if err != nil {
t.Fatal(err)
}
for i := uint64(0); i < 4; i++ {
err := f.AddPiece(types.SiaPublicKey{}, uint64(0), i, crypto.Hash{})
if err != nil {
t.Fatal(err)
}
}
if f.UploadedBytes() != 4*modules.SectorSize {
t.Errorf("expected uploadedBytes to be 8, got %v", f.UploadedBytes())
}
}
// TestFileUploadProgressPinning verifies that uploadProgress() returns at most
// 100%, even if more pieces have been uploaded,
func TestFileUploadProgressPinning(t *testing.T) {
rsc, _ := siafile.NewRSCode(1, 1)
f, err := newFileTesting(t.Name(), newTestingWal(), rsc, 4, 0777, "")
if err != nil {
t.Fatal(err)
}
for i := uint64(0); i < 2; i++ {
err1 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, uint64(0), i, crypto.Hash{})
err2 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, uint64(0), i, crypto.Hash{})
if err := errors.Compose(err1, err2); err != nil {
t.Fatal(err)
}
}
if f.UploadProgress() != 100 {
t.Fatal("expected uploadProgress to report 100% but was", f.UploadProgress())
}
}
// TestFileRedundancy tests that redundancy is correctly calculated for files
// with varying number of filecontracts and erasure code settings.
func TestFileRedundancy(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
nDatas := []int{1, 2, 10}
neverOffline := make(map[string]bool)
goodForRenew := make(map[string]bool)
......@@ -463,56 +425,6 @@ func TestFileHealth(t *testing.T) {
}
}
// TestFileExpiration probes the expiration method of the file type.
func TestFileExpiration(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
rsc, _ := siafile.NewRSCode(1, 2)
f, err := newFileTesting(t.Name(), newTestingWal(), rsc, 1000, 0777, "")
if err != nil {
t.Fatal(err)
}
contracts := make(map[string]modules.RenterContract)
if f.Expiration(contracts) != 0 {
t.Error("file with no pieces should report as having no time remaining")
}
// Create 3 public keys
pk1 := types.SiaPublicKey{Key: []byte{0}}
pk2 := types.SiaPublicKey{Key: []byte{1}}
pk3 := types.SiaPublicKey{Key: []byte{2}}
// Add a piece for each key to the file.
err1 := f.AddPiece(pk1, 0, 0, crypto.Hash{})
err2 := f.AddPiece(pk2, 0, 1, crypto.Hash{})
err3 := f.AddPiece(pk3, 0, 2, crypto.Hash{})
if err := errors.Compose(err1, err2, err3); err != nil {
t.Fatal(err)
}
// Add a contract.
fc := modules.RenterContract{}
fc.EndHeight = 100
contracts[pk1.String()] = fc
if f.Expiration(contracts) != 100 {
t.Error("file did not report lowest WindowStart")
}
// Add a contract with a lower WindowStart.
fc.EndHeight = 50
contracts[pk2.String()] = fc
if f.Expiration(contracts) != 50 {
t.Error("file did not report lowest WindowStart")
}
// Add a contract with a higher WindowStart.
fc.EndHeight = 75
contracts[pk3.String()] = fc
if f.Expiration(contracts) != 50 {
t.Error("file did not report lowest WindowStart")
}
}
// TestRenterFileListLocalPath verifies that FileList() returns the correct
// local path information for an uploaded file.
func TestRenterFileListLocalPath(t *testing.T) {
......
......@@ -493,7 +493,6 @@ func (r *Renter) managedRenterContractsAndUtilities(entrys []*siafile.SiaFileSet
r.log.Debugln("WARN: Could not update used hosts:", err)
}
}
// Build 2 maps that map every pubkey to its offline and goodForRenew
// status.
contracts = make(map[string]modules.RenterContract)
......@@ -510,6 +509,10 @@ func (r *Renter) managedRenterContractsAndUtilities(entrys []*siafile.SiaFileSet
offline[pk.String()] = r.hostContractor.IsOffline(pk)
contracts[pk.String()] = contract
}
// Update the cached expiration of the siafiles.
for _, e := range entrys {
_ = e.Expiration(contracts)
}
return offline, goodForRenew, contracts
}
......
......@@ -39,6 +39,10 @@ const (
// threadDepth is how deep the ThreadType will track calling files and
// calling lines
threadDepth = 3
// fileListRoutines is the number of goroutines used in FileList to load
// siafile metadata from disk
fileListRoutines = 20
)
var (
......
......@@ -54,7 +54,7 @@ func marshalErasureCoder(ec modules.ErasureCoder) ([4]byte, [8]byte) {
}
// marshalMetadata marshals the metadata of the SiaFile using json encoding.
func marshalMetadata(md metadata) ([]byte, error) {
func marshalMetadata(md Metadata) ([]byte, error) {
return json.Marshal(md)
}
......@@ -153,7 +153,7 @@ func unmarshalErasureCoder(ecType [4]byte, ecParams [8]byte) (modules.ErasureCod
}
// unmarshalMetadata unmarshals the json encoded metadata of the SiaFile.
func unmarshalMetadata(raw []byte) (md metadata, err error) {
func unmarshalMetadata(raw []byte) (md Metadata, err error) {
err = json.Unmarshal(raw, &md)
// We also need to create the erasure coder object.
......
......@@ -8,6 +8,7 @@ import (
"gitlab.com/NebulousLabs/Sia/crypto"
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/Sia/types"
"gitlab.com/NebulousLabs/errors"
"gitlab.com/NebulousLabs/fastrand"
"gitlab.com/NebulousLabs/writeaheadlog"
......@@ -18,8 +19,8 @@ type (
// siafiles even after renaming them.
SiafileUID string
// metadata is the metadata of a SiaFile and is JSON encoded.
metadata struct {
// Metadata is the metadata of a SiaFile and is JSON encoded.
Metadata struct {
StaticUniqueID SiafileUID `json:"uniqueid"` // unique identifier for file
StaticPagesPerChunk uint8 `json:"pagesperchunk"` // number of pages reserved for storing a chunk.
......@@ -40,6 +41,39 @@ type (
AccessTime time.Time `json:"accesstime"` // time of last access
CreateTime time.Time `json:"createtime"` // time of file creation
// Cached fields. These fields are cached fields and are only meant to be used
// to create FileInfos for file related API endpoints. There is no guarantee
// that these fields are up-to-date. Neither in memory nor on disk. Updates to
// these fields aren't persisted immediately. Instead they will only be
// persisted whenever another method persists the metadata or when the SiaFile
// is closed.
//
// CachedRedundancy is the redundancy of the file on the network and is
// updated within the 'Redundancy' method which is periodically called by the
// repair code.
//
// CachedHealth is the health of the file on the network and is also
// periodically updated by the health check loop whenever 'Health' is called.
//
// CachedStuckHealth is the health of the stuck chunks of the file. It is
// updated by the health check loop. CachedExpiration is the lowest height at
// which any of the file's contracts will expire. Also updated periodically by
// the health check loop whenever 'Health' is called.
//
// CachedUploadedBytes is the number of bytes of the file that have been
// uploaded to the network so far. Is updated every time a piece is added to
// the siafile.
//
// CachedUploadProgress is the upload progress of the file and is updated
// every time a piece is added to the siafile.
//
CachedRedundancy float64 `json:"cachedredundancy"`
CachedHealth float64 `json:"cachedhealth"`
CachedStuckHealth float64 `json:"cachedstuckhealth"`
CachedExpiration types.BlockHeight `json:"cachedexpiration"`
CachedUploadedBytes uint64 `json:"cacheduploadedbytes"`
CachedUploadProgress float64 `json:"cacheduploadprogress"`
// Repair loop fields
//
// Health is the worst health of the file's unstuck chunks and
......@@ -179,6 +213,13 @@ func (sf *SiaFile) MasterKey() crypto.CipherKey {
return sk
}
// Metadata returns the metadata of the SiaFile.
func (sf *SiaFile) Metadata() Metadata {
sf.mu.RLock()
defer sf.mu.RUnlock()
return sf.staticMetadata
}
// Mode returns the FileMode of the SiaFile.
func (sf *SiaFile) Mode() os.FileMode {
sf.mu.RLock()
......
......@@ -33,6 +33,12 @@ func LoadSiaFile(path string, wal *writeaheadlog.WAL) (*SiaFile, error) {
return loadSiaFile(path, wal, modules.ProdDependencies)
}
// LoadSiaFileMetadata is a wrapper for loadSiaFileMetadata that uses the
// production dependencies.
func LoadSiaFileMetadata(path string) (Metadata, error) {
return loadSiaFileMetadata(path, modules.ProdDependencies)
}
// applyUpdates applies a number of writeaheadlog updates to the corresponding
// SiaFile. This method can apply updates from different SiaFiles and should
// only be run before the SiaFiles are loaded from disk right after the startup
......@@ -124,6 +130,27 @@ func loadSiaFile(path string, wal *writeaheadlog.WAL, deps modules.Dependencies)
return sf, nil
}
// loadSiaFileMetadata loads only the metadata of a SiaFile from disk.
func loadSiaFileMetadata(path string, deps modules.Dependencies) (md Metadata, err error) {
// Open the file.
f, err := deps.Open(path)
if err != nil {
return Metadata{}, err
}
defer f.Close()
// Load the metadata.
decoder := json.NewDecoder(f)
if err = decoder.Decode(&md); err != nil {
return
}
// Create the erasure coder.
md.staticErasureCode, err = unmarshalErasureCoder(md.StaticErasureCodeType, md.StaticErasureCodeParams)
if err != nil {
return
}
return
}
// readAndApplyDeleteUpdate reads the delete update and applies it. This helper
// assumes that the file is not open
func readAndApplyDeleteUpdate(deps modules.Dependencies, update writeaheadlog.Update) error {
......
......@@ -49,7 +49,7 @@ func (sfs *SiaFileSet) NewFromLegacyData(fd FileData) (*SiaFileSetEntry, error)
return &SiaFileSetEntry{}, err
}
file := &SiaFile{
staticMetadata: metadata{
staticMetadata: Metadata{
AccessTime: currentTime,
ChunkOffset: defaultReservedMDPages * pageSize,
ChangeTime: currentTime,
......@@ -110,5 +110,9 @@ func (sfs *SiaFileSet) NewFromLegacyData(fd FileData) (*SiaFileSetEntry, error)
siaFileSetEntry: entry,
threadUID: threadUID,
}
// Update the cached fields for progress and uploaded bytes.
_, _ = file.UploadProgressAndBytes()
return sfse, errors.AddContext(file.saveFile(), "unable to save file")
}
......@@ -229,6 +229,11 @@ func TestNewFile(t *testing.T) {
chunks = append(chunks, c)
}
// Save the SiaFile to make sure cached fields are persisted too.
if err := sf.saveFile(); err != nil {
t.Fatal(err)
}
// Open the file.
f, err := os.OpenFile(sf.siaFilePath, os.O_RDWR, 777)
if err != nil {
......
......@@ -39,7 +39,7 @@ type (
// size of the staticMetadata on disk should always be a multiple of 4kib.
// The staticMetadata is also the only part of the file that is JSON encoded
// and can therefore be easily extended.
staticMetadata metadata
staticMetadata Metadata
// pubKeyTable stores the public keys of the hosts this file's pieces are uploaded to.
// Since multiple pieces from different chunks might be uploaded to the same host, this
......@@ -136,7 +136,7 @@ func New(siaPath modules.SiaPath, siaFilePath, source string, wal *writeaheadlog
currentTime := time.Now()
ecType, ecParams := marshalErasureCoder(erasureCode)
file := &SiaFile{
staticMetadata: metadata{
staticMetadata: Metadata{
AccessTime: currentTime,
ChunkOffset: defaultReservedMDPages * pageSize,
ChangeTime: currentTime,
......@@ -167,6 +167,13 @@ func New(siaPath modules.SiaPath, siaFilePath, source string, wal *writeaheadlog
for i := range file.chunks {
file.chunks[i].Pieces = make([][]piece, erasureCode.NumPieces())
}
// Init cached fields for 0-Byte files.
if file.staticMetadata.FileSize == 0 {
file.staticMetadata.CachedHealth = 0
file.staticMetadata.CachedStuckHealth = 0
file.staticMetadata.CachedRedundancy = float64(erasureCode.NumPieces()) / float64(erasureCode.MinPieces())
file.staticMetadata.CachedUploadProgress = 100
}
// Save file.
return file, file.saveFile()
}
......@@ -225,6 +232,9 @@ func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64
return errors.New("can't add piece to deleted file")
}
// Update cache.
defer sf.UploadProgressAndBytes()
// Get the index of the host in the public key table.
tableIndex := -1
for i, hpk := range sf.pubKeyTable {
......@@ -359,12 +369,20 @@ func (sf *SiaFile) ErasureCode() modules.ErasureCoder {
return sf.staticMetadata.staticErasureCode
}
// Expiration returns the lowest height at which any of the file's contracts
// will expire.
// Save saves the entire file to disk.
func (sf *SiaFile) Save() error {
sf.mu.Lock()
defer sf.mu.Unlock()
return sf.saveFile()
}
// Expiration updates CachedExpiration with the lowest height at which any of
// the file's contracts will expire and returns the new value.
func (sf *SiaFile) Expiration(contracts map[string]modules.RenterContract) types.BlockHeight {
sf.mu.RLock()
defer sf.mu.RUnlock()
sf.mu.Lock()
defer sf.mu.Unlock()
if len(sf.pubKeyTable) == 0 {
sf.staticMetadata.CachedExpiration = 0
return 0
}
......@@ -378,6 +396,7 @@ func (sf *SiaFile) Expiration(contracts map[string]modules.RenterContract) types
lowest = contract.EndHeight
}
}
sf.staticMetadata.CachedExpiration = lowest
return lowest
}
......@@ -388,13 +407,20 @@ func (sf *SiaFile) Expiration(contracts map[string]modules.RenterContract) types
//
// health = 0 is full redundancy, health <= 1 is recoverable, health > 1 needs
// to be repaired from disk
func (sf *SiaFile) Health(offline map[string]bool, goodForRenew map[string]bool) (float64, float64, uint64) {
func (sf *SiaFile) Health(offline map[string]bool, goodForRenew map[string]bool) (h float64, sh float64, nsc uint64) {
numPieces := float64(sf.staticMetadata.staticErasureCode.NumPieces())
minPieces := float64(sf.staticMetadata.staticErasureCode.MinPieces())
worstHealth := 1 - ((0 - minPieces) / (numPieces - minPieces))
sf.mu.RLock()
defer sf.mu.RUnlock()
sf.mu.Lock()
defer sf.mu.Unlock()
// Update the cache.
defer func() {
sf.staticMetadata.CachedHealth = h
sf.staticMetadata.CachedStuckHealth = sh
}()
// Check if siafile is deleted
if sf.deleted {
// Don't return health information of a deleted file to prevent
......@@ -449,17 +475,6 @@ func (sf *SiaFile) Health(offline map[string]bool, goodForRenew map[string]bool)
return health, stuckHealth, numStuckChunks
}
// HealthPercentage returns the health in a more human understandable format out
// of 100%
func (sf *SiaFile) HealthPercentage(health float64) float64 {
sf.mu.Lock()
defer sf.mu.Unlock()
dataPieces := sf.staticMetadata.staticErasureCode.MinPieces()
parityPieces := sf.staticMetadata.staticErasureCode.NumPieces() - dataPieces
worstHealth := 1 + float64(dataPieces)/float64(parityPieces)
return 100 * ((worstHealth - health) / worstHealth)
}
// HostPublicKeys returns all the public keys of hosts the file has ever been
// uploaded to. That means some of those hosts might no longer be in use.
func (sf *SiaFile) HostPublicKeys() (spks []types.SiaPublicKey) {
......@@ -602,9 +617,13 @@ func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) {
// unique within a file contract. -1 is returned if the file has size 0. It
// takes two arguments, a map of offline contracts for this file and a map that
// indicates if a contract is goodForRenew.
func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) float64 {
sf.mu.RLock()
defer sf.mu.RUnlock()
func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) (r float64) {
sf.mu.Lock()
defer sf.mu.Unlock()
// Update the cache.
defer func() {
sf.staticMetadata.CachedRedundancy = r
}()
if sf.staticMetadata.FileSize == 0 {
// TODO change this once tiny files are supported.
if len(sf.chunks) != 1 {
......@@ -718,17 +737,6 @@ func (sf *SiaFile) UID() SiafileUID {
return sf.staticMetadata.StaticUniqueID
}
// UploadedBytes indicates how many bytes of the file have been uploaded via
// current file contracts. Note that this is total uploaded bytes so it includes
// padding and redundancy, so uploadedBytes can return a value much larger than
// the file's original filesize.
func (sf *SiaFile) UploadedBytes() uint64 {
sf.mu.RLock()
defer sf.mu.RUnlock()
uploaded, _ := sf.uploadedBytes()
return uploaded
}
// UpdateUsedHosts updates the 'Used' flag for the entries in the pubKeyTable
// of the SiaFile. The keys of all used hosts should be passed to the method
// and the SiaFile will update the flag for hosts it knows of to 'true' and set
......@@ -774,20 +782,6 @@ func (sf *SiaFile) UpdateUsedHosts(used []types.SiaPublicKey) error {
return sf.createAndApplyTransaction(updates...)
}
// UploadProgress indicates what percentage of the file has been uploaded based
// on the unique pieces that have been uploaded. Note that a file may be
// Available long before UploadProgress reaches 100%.
func (sf *SiaFile) UploadProgress() float64 {
if sf.Size() == 0 {
return 100
}
desired := sf.NumChunks() * modules.SectorSize * uint64(sf.ErasureCode().NumPieces())
sf.mu.RLock()
defer sf.mu.RUnlock()
_, uploaded := sf.uploadedBytes()
return math.Min(100*(float64(uploaded)/float64(desired)), 100)
}
// defragChunk removes pieces which belong to bad hosts and if that wasn't
// enough to reduce the chunkSize below the maximum size, it will remove
// redundant pieces.
......@@ -898,6 +892,24 @@ func (sf *SiaFile) goodPieces(chunkIndex int, offlineMap map[string]bool, goodFo
return numPiecesGoodForRenew, numPiecesGoodForUpload
}
// UploadProgressAndBytes updates the CachedUploadProgress and
// CachedUploadedBytes fields to indicate what percentage of the file has been
// uploaded based on the unique pieces that have been uploaded and also how many
// bytes have been uploaded of that file in total. Note that a file may be
// Available long before UploadProgress reaches 100%.
func (sf *SiaFile) UploadProgressAndBytes() (float64, uint64) {
_, uploaded := sf.uploadedBytes()
if sf.staticMetadata.FileSize == 0 {
// Update cache.
sf.staticMetadata.CachedUploadProgress = 100
return 100, uploaded
}
desired := uint64(len(sf.chunks)) * modules.SectorSize * uint64(sf.staticMetadata.staticErasureCode.NumPieces())
// Update cache.
sf.staticMetadata.CachedUploadProgress = math.Min(100*(float64(uploaded)/float64(desired)), 100)
return sf.staticMetadata.CachedUploadProgress, uploaded
}
// uploadedBytes indicates how many bytes of the file have been uploaded via
// current file contracts in total as well as unique uploaded bytes. Note that
// this includes padding and redundancy, so uploadedBytes can return a value
......@@ -921,5 +933,7 @@ func (sf *SiaFile) uploadedBytes() (uint64, uint64) {
unique += modules.SectorSize
}
}
// Update cache.
sf.staticMetadata.CachedUploadedBytes = total
return total, unique
}
......@@ -11,6 +11,7 @@ import (
"gitlab.com/NebulousLabs/Sia/crypto"
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/Sia/types"
"gitlab.com/NebulousLabs/errors"
"gitlab.com/NebulousLabs/fastrand"
)
......@@ -305,6 +306,12 @@ func TestDefragChunk(t *testing.T) {
duration += time.Since(before)
}
// Save the file to disk again to make sure cached fields are persisted.
err = sf.saveFile()
if err != nil {
t.Fatal(err)
}
// Finally load the file from disk again and compare it to the original.
sf2, err := LoadSiaFile(sf.siaFilePath, sf.wal)
if err != nil {
......@@ -702,6 +709,9 @@ func TestStuckChunks(t *testing.T) {
// TestUploadedBytes tests that uploadedBytes() returns the expected values for
// total and unique uploaded bytes.
func TestUploadedBytes(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
// Create a new blank test file
f := newBlankTestFile()
// Add multiple pieces to the first pieceSet of the first piece of the first
......@@ -714,9 +724,80 @@ func TestUploadedBytes(t *testing.T) {
}
totalBytes, uniqueBytes := f.uploadedBytes()
if totalBytes != 4*modules.SectorSize {
t.Errorf("expected totalBytes to be %v, got %v", 4*modules.SectorSize, f.UploadedBytes())
t.Errorf("expected totalBytes to be %v, got %v", 4*modules.SectorSize, totalBytes)
}
if uniqueBytes != modules.SectorSize {
t.Errorf("expected uploadedBytes to be %v, got %v", modules.SectorSize, f.UploadedBytes())
t.Errorf("expected uploadedBytes to be %v, got %v", modules.SectorSize, uniqueBytes)
}
}
// TestFileUploadProgressPinning verifies that uploadProgress() returns at most
// 100%, even if more pieces have been uploaded,
func TestFileUploadProgressPinning(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
f := newBlankTestFile()
for chunkIndex := uint64(0); chunkIndex < f.NumChunks(); chunkIndex++ {
for pieceIndex := uint64(0); pieceIndex < uint64(f.ErasureCode().NumPieces()); pieceIndex++ {
err1 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(0)}}, chunkIndex, pieceIndex, crypto.Hash{})
err2 := f.AddPiece(types.SiaPublicKey{Key: []byte{byte(1)}}, chunkIndex, pieceIndex, crypto.Hash{})
if err := errors.Compose(err1, err2); err != nil {
t.Fatal(err)
}
}
}
if f.staticMetadata.CachedUploadProgress != 100 {
t.Fatal("expected uploadProgress to report 100% but was", f.staticMetadata.CachedUploadProgress)