Commit 75810679 authored by Christopher Schinnerl's avatar Christopher Schinnerl

Add fileinfo caching to SiaFile

parent dd9b3bdb
......@@ -189,45 +189,42 @@ func (r *Renter) SetFileStuck(siaPath modules.SiaPath, stuck bool) error {
// fileInfo takes the maps returned by renter.managedContractUtilityMaps as
// many files at once.
func (r *Renter) fileInfo(siaPath modules.SiaPath, offline map[string]bool, goodForRenew map[string]bool, contracts map[string]modules.RenterContract) (modules.FileInfo, error) {
// Get the file and its contracts
entry, err := r.staticFileSet.Open(siaPath)
// Get the file's metadata and its contracts
md, err := r.staticFileSet.Metadata(siaPath)
if err != nil {
return modules.FileInfo{}, err
}
defer entry.Close()
// Build the FileInfo
var onDisk bool
localPath := entry.LocalPath()
localPath := md.LocalPath
if localPath != "" {
_, err = os.Stat(localPath)
onDisk = err == nil
}
redundancy := entry.Redundancy(offline, goodForRenew)
health, stuckHealth, numStuckChunks := entry.Health(offline, goodForRenew)
fileInfo := modules.FileInfo{
AccessTime: entry.AccessTime(),
Available: redundancy >= 1,
ChangeTime: entry.ChangeTime(),
CipherType: entry.MasterKey().Type().String(),
CreateTime: entry.CreateTime(),
Expiration: entry.Expiration(contracts),
Filesize: entry.Size(),
Health: health,
AccessTime: md.AccessTime,
Available: md.CachedRedundancy >= 1,
ChangeTime: md.ChangeTime,
CipherType: md.StaticMasterKeyType.String(),
CreateTime: md.CreateTime,
Expiration: md.CachedExpiration,
Filesize: uint64(md.FileSize),
Health: md.CachedHealth,
LocalPath: localPath,
MaxHealth: math.Max(health, stuckHealth),
MaxHealthPercent: entry.HealthPercentage(math.Max(health, stuckHealth)),
ModTime: entry.ModTime(),
NumStuckChunks: numStuckChunks,
MaxHealth: math.Max(md.CachedHealth, md.CachedStuckHealth),
MaxHealthPercent: md.HealthPercentage(),
ModTime: md.ModTime,
NumStuckChunks: md.NumStuckChunks,
OnDisk: onDisk,
Recoverable: onDisk || redundancy >= 1,
Redundancy: redundancy,
Recoverable: onDisk || md.CachedRedundancy >= 1,
Redundancy: md.CachedRedundancy,
Renewing: true,
SiaPath: r.staticFileSet.SiaPath(entry),
Stuck: numStuckChunks > 0,
StuckHealth: stuckHealth,
UploadedBytes: entry.UploadedBytes(),
UploadProgress: entry.UploadProgress(),
SiaPath: siaPath,
Stuck: md.NumStuckChunks > 0,
StuckHealth: md.CachedStuckHealth,
UploadedBytes: md.CachedUploadedBytes,
UploadProgress: md.CachedUploadProgress,
}
return fileInfo, nil
......
......@@ -54,7 +54,7 @@ func marshalErasureCoder(ec modules.ErasureCoder) ([4]byte, [8]byte) {
}
// marshalMetadata marshals the metadata of the SiaFile using json encoding.
func marshalMetadata(md metadata) ([]byte, error) {
func marshalMetadata(md Metadata) ([]byte, error) {
return json.Marshal(md)
}
......@@ -153,7 +153,7 @@ func unmarshalErasureCoder(ecType [4]byte, ecParams [8]byte) (modules.ErasureCod
}
// unmarshalMetadata unmarshals the json encoded metadata of the SiaFile.
func unmarshalMetadata(raw []byte) (md metadata, err error) {
func unmarshalMetadata(raw []byte) (md Metadata, err error) {
err = json.Unmarshal(raw, &md)
// We also need to create the erasure coder object.
......
......@@ -2,12 +2,14 @@ package siafile
import (
"encoding/hex"
"math"
"os"
"path/filepath"
"time"
"gitlab.com/NebulousLabs/Sia/crypto"
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/Sia/types"
"gitlab.com/NebulousLabs/errors"
"gitlab.com/NebulousLabs/fastrand"
"gitlab.com/NebulousLabs/writeaheadlog"
......@@ -18,8 +20,8 @@ type (
// siafiles even after renaming them.
SiafileUID string
// metadata is the metadata of a SiaFile and is JSON encoded.
metadata struct {
// Metadata is the metadata of a SiaFile and is JSON encoded.
Metadata struct {
StaticUniqueID SiafileUID `json:"uniqueid"` // unique identifier for file
StaticPagesPerChunk uint8 `json:"pagesperchunk"` // number of pages reserved for storing a chunk.
......@@ -40,6 +42,37 @@ type (
AccessTime time.Time `json:"accesstime"` // time of last access
CreateTime time.Time `json:"createtime"` // time of file creation
// Cached fields. These fields are cached fields and are only meant to be used
// to create FileInfos for file related API endpoints. There is no guarantee
// that these fields are up-to-date. Neither in memory nor on disk. Updates to
// these fields aren't persisted immediately.
//
// CachedRedundancy is the redundancy of the file on the network and is
// updated within the 'Redundancy' method which is periodically called by the
// repair code.
//
// CachedHealth is the health of the file on the network and is also
// periodically updated by the health check loop whenever 'Health' is called.
//
// CachedStuckHealth is the health of the stuck chunks of the file. It is
// updated by the health check loop. CachedExpiration is the lowest height at
// which any of the file's contracts will expire. Also updated periodically by
// the health check loop whenever 'Health' is called.
//
// CachedUploadedBytes is the number of bytes of the file that have been
// uploaded to the network so far. Is updated every time a piece is added to
// the siafile.
//
// CachedUploadProgress is the upload progress of the file and is updated
// every time a piece is added to the siafile.
//
CachedRedundancy float64 `json:"cachedredundancy"`
CachedHealth float64 `json:"cachedhealth"`
CachedStuckHealth float64 `json:"cachedstuckhealth"`
CachedExpiration types.BlockHeight `json:"cachedexpiration"`
CachedUploadedBytes uint64 `json:"cacheduploadedbytes"`
CachedUploadProgress float64 `json:"cacheduploadprogress"`
// Repair loop fields
//
// Health is the worst health of the file's unstuck chunks and
......@@ -153,6 +186,16 @@ func (sf *SiaFile) ChunkSize() uint64 {
return sf.staticChunkSize()
}
// HealthPercentage returns the health in a more human understandable format out
// of 100%
func (md Metadata) HealthPercentage() float64 {
health := math.Max(md.CachedHealth, md.CachedStuckHealth)
dataPieces := md.staticErasureCode.MinPieces()
parityPieces := md.staticErasureCode.NumPieces() - dataPieces
worstHealth := 1 + float64(dataPieces)/float64(parityPieces)
return 100 * ((worstHealth - health) / worstHealth)
}
// LastHealthCheckTime returns the LastHealthCheckTime timestamp of the file
func (sf *SiaFile) LastHealthCheckTime() time.Time {
sf.mu.Lock()
......
......@@ -33,6 +33,12 @@ func LoadSiaFile(path string, wal *writeaheadlog.WAL) (*SiaFile, error) {
return loadSiaFile(path, wal, modules.ProdDependencies)
}
// LoadSiaFileMetadata is a wrapper for loadSiaFileMetadata that uses the
// production dependencies.
func LoadSiaFileMetadata(path string) (Metadata, error) {
return loadSiaFileMetadata(path, modules.ProdDependencies)
}
// applyUpdates applies a number of writeaheadlog updates to the corresponding
// SiaFile. This method can apply updates from different SiaFiles and should
// only be run before the SiaFiles are loaded from disk right after the startup
......@@ -124,6 +130,20 @@ func loadSiaFile(path string, wal *writeaheadlog.WAL, deps modules.Dependencies)
return sf, nil
}
// loadSiaFileMetadata loads only the metadata of a SiaFile from disk.
func loadSiaFileMetadata(path string, deps modules.Dependencies) (md Metadata, err error) {
// Open the file.
f, err := deps.Open(path)
if err != nil {
return Metadata{}, err
}
defer f.Close()
// Load the metadata.
decoder := json.NewDecoder(f)
err = decoder.Decode(&md)
return
}
// readAndApplyDeleteUpdate reads the delete update and applies it. This helper
// assumes that the file is not open
func readAndApplyDeleteUpdate(deps modules.Dependencies, update writeaheadlog.Update) error {
......
......@@ -49,7 +49,7 @@ func (sfs *SiaFileSet) NewFromLegacyData(fd FileData) (*SiaFileSetEntry, error)
return &SiaFileSetEntry{}, err
}
file := &SiaFile{
staticMetadata: metadata{
staticMetadata: Metadata{
AccessTime: currentTime,
ChunkOffset: defaultReservedMDPages * pageSize,
ChangeTime: currentTime,
......
......@@ -39,7 +39,7 @@ type (
// size of the staticMetadata on disk should always be a multiple of 4kib.
// The staticMetadata is also the only part of the file that is JSON encoded
// and can therefore be easily extended.
staticMetadata metadata
staticMetadata Metadata
// pubKeyTable stores the public keys of the hosts this file's pieces are uploaded to.
// Since multiple pieces from different chunks might be uploaded to the same host, this
......@@ -136,7 +136,7 @@ func New(siaPath modules.SiaPath, siaFilePath, source string, wal *writeaheadlog
currentTime := time.Now()
ecType, ecParams := marshalErasureCoder(erasureCode)
file := &SiaFile{
staticMetadata: metadata{
staticMetadata: Metadata{
AccessTime: currentTime,
ChunkOffset: defaultReservedMDPages * pageSize,
ChangeTime: currentTime,
......@@ -225,6 +225,9 @@ func (sf *SiaFile) AddPiece(pk types.SiaPublicKey, chunkIndex, pieceIndex uint64
return errors.New("can't add piece to deleted file")
}
// Update cache.
defer sf.updateUploadProgressAndBytes()
// Get the index of the host in the public key table.
tableIndex := -1
for i, hpk := range sf.pubKeyTable {
......@@ -359,13 +362,14 @@ func (sf *SiaFile) ErasureCode() modules.ErasureCoder {
return sf.staticMetadata.staticErasureCode
}
// Expiration returns the lowest height at which any of the file's contracts
// will expire.
func (sf *SiaFile) Expiration(contracts map[string]modules.RenterContract) types.BlockHeight {
// UpdateExpiration updates CachedExpiration with the lowest height at which any
// of the file's contracts will expire.
func (sf *SiaFile) UpdateExpiration(contracts map[string]modules.RenterContract) {
sf.mu.RLock()
defer sf.mu.RUnlock()
if len(sf.pubKeyTable) == 0 {
return 0
sf.staticMetadata.CachedExpiration = 0
return
}
lowest := ^types.BlockHeight(0)
......@@ -378,7 +382,7 @@ func (sf *SiaFile) Expiration(contracts map[string]modules.RenterContract) types
lowest = contract.EndHeight
}
}
return lowest
sf.staticMetadata.CachedExpiration = lowest
}
// Health calculates the health of the file to be used in determining repair
......@@ -388,13 +392,20 @@ func (sf *SiaFile) Expiration(contracts map[string]modules.RenterContract) types
//
// health = 0 is full redundancy, health <= 1 is recoverable, health > 1 needs
// to be repaired from disk
func (sf *SiaFile) Health(offline map[string]bool, goodForRenew map[string]bool) (float64, float64, uint64) {
func (sf *SiaFile) Health(offline map[string]bool, goodForRenew map[string]bool) (h float64, sh float64, nsc uint64) {
numPieces := float64(sf.staticMetadata.staticErasureCode.NumPieces())
minPieces := float64(sf.staticMetadata.staticErasureCode.MinPieces())
worstHealth := 1 - ((0 - minPieces) / (numPieces - minPieces))
sf.mu.RLock()
defer sf.mu.RUnlock()
// Update the cache.
defer func() {
sf.staticMetadata.CachedHealth = h
sf.staticMetadata.CachedStuckHealth = sh
}()
// Check if siafile is deleted
if sf.deleted {
// Don't return health information of a deleted file to prevent
......@@ -449,17 +460,6 @@ func (sf *SiaFile) Health(offline map[string]bool, goodForRenew map[string]bool)
return health, stuckHealth, numStuckChunks
}
// HealthPercentage returns the health in a more human understandable format out
// of 100%
func (sf *SiaFile) HealthPercentage(health float64) float64 {
sf.mu.Lock()
defer sf.mu.Unlock()
dataPieces := sf.staticMetadata.staticErasureCode.MinPieces()
parityPieces := sf.staticMetadata.staticErasureCode.NumPieces() - dataPieces
worstHealth := 1 + float64(dataPieces)/float64(parityPieces)
return 100 * ((worstHealth - health) / worstHealth)
}
// HostPublicKeys returns all the public keys of hosts the file has ever been
// uploaded to. That means some of those hosts might no longer be in use.
func (sf *SiaFile) HostPublicKeys() (spks []types.SiaPublicKey) {
......@@ -602,9 +602,13 @@ func (sf *SiaFile) Pieces(chunkIndex uint64) ([][]Piece, error) {
// unique within a file contract. -1 is returned if the file has size 0. It
// takes two arguments, a map of offline contracts for this file and a map that
// indicates if a contract is goodForRenew.
func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) float64 {
func (sf *SiaFile) Redundancy(offlineMap map[string]bool, goodForRenewMap map[string]bool) (r float64) {
sf.mu.RLock()
defer sf.mu.RUnlock()
// Update the cache.
defer func() {
sf.staticMetadata.CachedRedundancy = r
}()
if sf.staticMetadata.FileSize == 0 {
// TODO change this once tiny files are supported.
if len(sf.chunks) != 1 {
......@@ -718,17 +722,6 @@ func (sf *SiaFile) UID() SiafileUID {
return sf.staticMetadata.StaticUniqueID
}
// UploadedBytes indicates how many bytes of the file have been uploaded via
// current file contracts. Note that this is total uploaded bytes so it includes
// padding and redundancy, so uploadedBytes can return a value much larger than
// the file's original filesize.
func (sf *SiaFile) UploadedBytes() uint64 {
sf.mu.RLock()
defer sf.mu.RUnlock()
uploaded, _ := sf.uploadedBytes()
return uploaded
}
// UpdateUsedHosts updates the 'Used' flag for the entries in the pubKeyTable
// of the SiaFile. The keys of all used hosts should be passed to the method
// and the SiaFile will update the flag for hosts it knows of to 'true' and set
......@@ -774,20 +767,6 @@ func (sf *SiaFile) UpdateUsedHosts(used []types.SiaPublicKey) error {
return sf.createAndApplyTransaction(updates...)
}
// UploadProgress indicates what percentage of the file has been uploaded based
// on the unique pieces that have been uploaded. Note that a file may be
// Available long before UploadProgress reaches 100%.
func (sf *SiaFile) UploadProgress() float64 {
if sf.Size() == 0 {
return 100
}
desired := sf.NumChunks() * modules.SectorSize * uint64(sf.ErasureCode().NumPieces())
sf.mu.RLock()
defer sf.mu.RUnlock()
_, uploaded := sf.uploadedBytes()
return math.Min(100*(float64(uploaded)/float64(desired)), 100)
}
// defragChunk removes pieces which belong to bad hosts and if that wasn't
// enough to reduce the chunkSize below the maximum size, it will remove
// redundant pieces.
......@@ -898,6 +877,23 @@ func (sf *SiaFile) goodPieces(chunkIndex int, offlineMap map[string]bool, goodFo
return numPiecesGoodForRenew, numPiecesGoodForUpload
}
// updateUploadProgressAndBytes updates the CachedUploadProgress and
// CachedUploadedBytes fields to indicate what percentage of the file has been
// uploaded based on the unique pieces that have been uploaded and also how many
// bytes have been uploaded of that file in total. Note that a file may be
// Available long before UploadProgress reaches 100%.
func (sf *SiaFile) updateUploadProgressAndBytes() {
_, uploaded := sf.uploadedBytes()
if sf.staticMetadata.FileSize == 0 {
// Update cache.
sf.staticMetadata.CachedUploadProgress = 100
return
}
desired := sf.NumChunks() * modules.SectorSize * uint64(sf.ErasureCode().NumPieces())
// Update cache.
sf.staticMetadata.CachedUploadProgress = math.Min(100*(float64(uploaded)/float64(desired)), 100)
}
// uploadedBytes indicates how many bytes of the file have been uploaded via
// current file contracts in total as well as unique uploaded bytes. Note that
// this includes padding and redundancy, so uploadedBytes can return a value
......@@ -921,5 +917,7 @@ func (sf *SiaFile) uploadedBytes() (uint64, uint64) {
unique += modules.SectorSize
}
}
// Update cache.
sf.staticMetadata.CachedUploadedBytes = total
return total, unique
}
......@@ -714,9 +714,9 @@ func TestUploadedBytes(t *testing.T) {
}
totalBytes, uniqueBytes := f.uploadedBytes()
if totalBytes != 4*modules.SectorSize {
t.Errorf("expected totalBytes to be %v, got %v", 4*modules.SectorSize, f.UploadedBytes())
t.Errorf("expected totalBytes to be %v, got %v", 4*modules.SectorSize, totalBytes)
}
if uniqueBytes != modules.SectorSize {
t.Errorf("expected uploadedBytes to be %v, got %v", modules.SectorSize, f.UploadedBytes())
t.Errorf("expected uploadedBytes to be %v, got %v", modules.SectorSize, uniqueBytes)
}
}
......@@ -258,6 +258,25 @@ func (sfs *SiaFileSet) open(siaPath modules.SiaPath) (*SiaFileSetEntry, error) {
}, nil
}
// metadata returns the metadata of the SiaFile at siaPath.
func (sfs *SiaFileSet) metadata(siaPath modules.SiaPath) (Metadata, error) {
var entry *siaFileSetEntry
entry, _, exists := sfs.siaPathToEntryAndUID(siaPath)
if exists {
// Get metadata from entry.
return entry.staticMetadata, nil
}
// Try and Load Metadata from disk
md, err := LoadSiaFileMetadata(siaPath.SiaFileSysPath(sfs.siaFileDir))
if os.IsNotExist(err) {
return Metadata{}, ErrUnknownPath
}
if err != nil {
return Metadata{}, err
}
return md, nil
}
// Delete deletes the SiaFileSetEntry's SiaFile
func (sfs *SiaFileSet) Delete(siaPath modules.SiaPath) error {
sfs.mu.Lock()
......@@ -329,6 +348,13 @@ func (sfs *SiaFileSet) Open(siaPath modules.SiaPath) (*SiaFileSetEntry, error) {
return sfs.open(siaPath)
}
// Metadata returns the metadata of a SiaFile.
func (sfs *SiaFileSet) Metadata(siaPath modules.SiaPath) (Metadata, error) {
sfs.mu.Lock()
defer sfs.mu.Unlock()
return sfs.metadata(siaPath)
}
// Rename will move a siafile from one path to a new path. Existing entries that
// are already open at the old path will continue to be valid.
func (sfs *SiaFileSet) Rename(siaPath, newSiaPath modules.SiaPath) error {
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment