Commit 3ad08b81 authored by David Vorick's avatar David Vorick

Merge branch 'master' into benches

Conflicts:
	sync/trymutex_test.go
parents d0bcd4ed e50f57b1
......@@ -3,6 +3,11 @@ Version History
April 2017:
v1.2.1 (patch release)
- Faster host upgrading
- Fix wallet bugs
- Add siac command to cancel allowance
v1.2.0 (minor release)
- Host overhaul
- Wallet overhaul
......
# [![Sia Logo](http://sia.tech/img/svg/sia-green-logo.svg)](http://sia.tech) v1.2.0 (Blue Moon)
# [![Sia Logo](http://sia.tech/img/svg/sia-green-logo.svg)](http://sia.tech) v1.2.1 (Blue Moon)
[![Build Status](https://travis-ci.org/NebulousLabs/Sia.svg?branch=master)](https://travis-ci.org/NebulousLabs/Sia)
[![GoDoc](https://godoc.org/github.com/NebulousLabs/Sia?status.svg)](https://godoc.org/github.com/NebulousLabs/Sia)
......
......@@ -19,7 +19,7 @@ import (
var (
// recommendedHosts is the number of hosts that the renter will form
// contracts with if the value is not specified explicity in the call to
// contracts with if the value is not specified explicitly in the call to
// SetSettings.
recommendedHosts = build.Select(build.Var{
Standard: uint64(50),
......
......@@ -400,7 +400,7 @@ func createServerTester(name string) (*serverTester, error) {
// createAuthenticatedServerTester creates an authenticated server tester
// object that is ready for testing, including money in the wallet and all
// modules initalized.
// modules initialized.
func createAuthenticatedServerTester(name string, password string) (*serverTester, error) {
// createAuthenticatedServerTester should not get called during short
// tests, as it takes a long time to run.
......
......@@ -833,3 +833,122 @@ func TestWalletReset(t *testing.T) {
t.Error("wallet is not unlocked")
}
}
func TestWalletSiafunds(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
walletPassword := "testpass"
key := crypto.TwofishKey(crypto.HashObject(walletPassword))
testdir := build.TempDir("api", t.Name())
st, err := assembleServerTester(key, testdir)
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
// mine some money
for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ {
_, err := st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
}
// record transactions
var wtg WalletTransactionsGET
err = st.getAPI("/wallet/transactions?startheight=0&endheight=100", &wtg)
if err != nil {
t.Fatal(err)
}
numTxns := len(wtg.ConfirmedTransactions)
// load siafunds into the wallet
siagPath, _ := filepath.Abs("../types/siag0of1of1.siakey")
loadSiagValues := url.Values{}
loadSiagValues.Set("keyfiles", siagPath)
loadSiagValues.Set("encryptionpassword", walletPassword)
err = st.stdPostAPI("/wallet/siagkey", loadSiagValues)
if err != nil {
t.Fatal(err)
}
err = st.getAPI("/wallet/transactions?startheight=0&endheight=100", &wtg)
if err != nil {
t.Fatal(err)
}
if len(wtg.ConfirmedTransactions) != numTxns+1 {
t.Errorf("expected %v transactions, got %v", numTxns+1, len(wtg.ConfirmedTransactions))
}
// check balance
var wg WalletGET
err = st.getAPI("/wallet", &wg)
if err != nil {
t.Fatal(err)
}
if wg.SiafundBalance.Cmp64(2000) != 0 {
t.Fatalf("bad siafund balance: expected %v, got %v", 2000, wg.SiafundBalance)
}
// spend the siafunds into the wallet seed
var wag WalletAddressGET
err = st.getAPI("/wallet/address", &wag)
if err != nil {
t.Fatal(err)
}
sendSiafundsValues := url.Values{}
sendSiafundsValues.Set("amount", "2000")
sendSiafundsValues.Set("destination", wag.Address.String())
err = st.stdPostAPI("/wallet/siafunds", sendSiafundsValues)
if err != nil {
t.Fatal(err)
}
// Announce the host and form an allowance with it. This will result in a
// siafund claim.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
// mine a block so that the announcement makes it into the blockchain
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
// form allowance
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "20"
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// mine a block so that the file contract makes it into the blockchain
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
// wallet should now have a claim balance
err = st.getAPI("/wallet", &wg)
if err != nil {
t.Fatal(err)
}
if wg.SiacoinClaimBalance.IsZero() {
t.Fatal("expected non-zero claim balance")
}
}
......@@ -7,7 +7,7 @@ import (
const (
// Version is the current version of siad.
Version = "1.2.0"
Version = "1.2.1"
// MaxEncodedVersionLength is the maximum length of a version string encoded
// with the encode package. 100 is much larger than any version number we send
......
......@@ -101,7 +101,7 @@ hosts
// Duration of contracts formed. Must be nonzero.
period // block height
// Renew window specifies how many blocks before the expriation of the current
// Renew window specifies how many blocks before the expiration of the current
// contracts the renter will wait before renewing the contracts. A smaller
// renew window means that Sia must be run more frequently, but also means
// fewer total transaction fees. Storage spending is not affected by the renew
......
......@@ -302,7 +302,7 @@ These siacoins are sent to the same address as the siafunds.
\section{Economics of Sia}
The primary currency of Sia is the siacoin.
The supply of siacoins will increase permanently, and all fresh supply will be given to miners as a block subisdy.
The supply of siacoins will increase permanently, and all fresh supply will be given to miners as a block subsidy.
The first block will have 300,000 coins minted.
This number will decrease by 1 coin per block, until a minimum of 30,000 coins per block is reached.
Following a target of 10 minutes between blocks, the annual growth in supply is:\\
......
......@@ -60,7 +60,7 @@ func (cs *ConsensusSet) validateHeaderAndBlock(tx dbTx, b types.Block) error {
// Check that the timestamp is not too far in the past to be acceptable.
minTimestamp := cs.blockRuleHelper.minimumValidChildTimestamp(blockMap, &parent)
return cs.blockValidator.ValidateBlock(b, minTimestamp, parent.ChildTarget, parent.Height+1)
return cs.blockValidator.ValidateBlock(b, minTimestamp, parent.ChildTarget, parent.Height+1, cs.log)
}
// checkHeaderTarget returns true if the header's ID meets the given target.
......
......@@ -7,6 +7,7 @@ import (
"time"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/persist"
"github.com/NebulousLabs/Sia/types"
)
......@@ -163,7 +164,7 @@ func (brh mockBlockRuleHelper) minimumValidChildTimestamp(blockMap dbBucket, pb
// ValidateBlock stores the parameters it receives and returns the mock error
// defined by mockBlockValidator.err.
func (bv mockBlockValidator) ValidateBlock(b types.Block, minTimestamp types.Timestamp, target types.Target, height types.BlockHeight) error {
func (bv mockBlockValidator) ValidateBlock(b types.Block, minTimestamp types.Timestamp, target types.Target, height types.BlockHeight, log *persist.Logger) error {
validateBlockParamsGot = validateBlockParams{true, b, minTimestamp, target, height}
return bv.err
}
......
......@@ -5,6 +5,7 @@ import (
"errors"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/persist"
"github.com/NebulousLabs/Sia/types"
)
......@@ -20,7 +21,7 @@ var (
type blockValidator interface {
// ValidateBlock validates a block against a minimum timestamp, a block
// target, and a block height.
ValidateBlock(types.Block, types.Timestamp, types.Target, types.BlockHeight) error
ValidateBlock(types.Block, types.Timestamp, types.Target, types.BlockHeight, *persist.Logger) error
}
// stdBlockValidator is the standard implementation of blockValidator.
......@@ -63,7 +64,7 @@ func checkTarget(b types.Block, target types.Target) bool {
// ValidateBlock validates a block against a minimum timestamp, a block target,
// and a block height. Returns nil if the block is valid and an appropriate
// error otherwise.
func (bv stdBlockValidator) ValidateBlock(b types.Block, minTimestamp types.Timestamp, target types.Target, height types.BlockHeight) error {
func (bv stdBlockValidator) ValidateBlock(b types.Block, minTimestamp types.Timestamp, target types.Target, height types.BlockHeight, log *persist.Logger) error {
// Check that the timestamp is not too far in the past to be acceptable.
if minTimestamp > b.Timestamp {
return errEarlyTimestamp
......@@ -75,7 +76,8 @@ func (bv stdBlockValidator) ValidateBlock(b types.Block, minTimestamp types.Time
}
// Check that the block is below the size limit.
if uint64(len(bv.marshaler.Marshal(b))) > types.BlockSizeLimit {
blockSize := len(bv.marshaler.Marshal(b))
if uint64(blockSize) > types.BlockSizeLimit {
return errLargeBlock
}
......@@ -98,5 +100,9 @@ func (bv stdBlockValidator) ValidateBlock(b types.Block, minTimestamp types.Time
if b.Timestamp > bv.clock.Now()+types.FutureThreshold {
return errFutureTimestamp
}
if log != nil {
log.Debugf("validated block at height %v, block size: %vB", height, blockSize)
}
return nil
}
......@@ -78,7 +78,7 @@ func TestUnitValidateBlock(t *testing.T) {
now: tt.now,
},
}
err := blockValidator.ValidateBlock(b, tt.minTimestamp, types.RootDepth, 0)
err := blockValidator.ValidateBlock(b, tt.minTimestamp, types.RootDepth, 0, nil)
if err != tt.errWant {
t.Errorf("%s: got %v, want %v", tt.msg, err, tt.errWant)
}
......
......@@ -108,7 +108,7 @@ func (cs *ConsensusSet) createConsensusDB(tx *bolt.Tx) error {
UnlockHash: types.UnlockHash{},
})
// Add the genesis block to the block strucutres - checksum must be taken
// Add the genesis block to the block structures - checksum must be taken
// after pushing the genesis block into the path.
pushPath(tx, cs.blockRoot.Block.ID())
if build.DEBUG {
......
......@@ -16,7 +16,7 @@ var (
errDiffsNotGenerated = errors.New("applying diff set before generating errors")
errInvalidSuccessor = errors.New("generating diffs for a block that's an invalid successsor to the current block")
errNegativePoolAdjustment = errors.New("committing a siafund pool diff with a negative adjustment")
errNonApplySiafundPoolDiff = errors.New("commiting a siafund pool diff that doesn't have the 'apply' direction")
errNonApplySiafundPoolDiff = errors.New("committing a siafund pool diff that doesn't have the 'apply' direction")
errRevertSiafundPoolDiffMismatch = errors.New("committing a siafund pool diff with an invalid 'adjusted' field")
errWrongAppliedDiffSet = errors.New("applying a diff set that isn't the current block")
errWrongRevertDiffSet = errors.New("reverting a diff set that isn't the current block")
......
......@@ -184,7 +184,7 @@ func applyFileContractMaintenance(tx *bolt.Tx, pb *processedBlock) {
}
// applyMaintenance applies block-level alterations to the consensus set.
// Maintenance is applied after all of the transcations for the block have been
// Maintenance is applied after all of the transactions for the block have been
// applied.
func applyMaintenance(tx *bolt.Tx, pb *processedBlock) {
applyMinerPayouts(tx, pb)
......
......@@ -120,7 +120,7 @@ func (cs *ConsensusSet) setChildTarget(blockMap *bolt.Bucket, pb *processedBlock
}
// newChild creates a blockNode from a block and adds it to the parent's set of
// children. The new node is also returned. It necessairly modifies the database
// children. The new node is also returned. It necessarily modifies the database
func (cs *ConsensusSet) newChild(tx *bolt.Tx, pb *processedBlock, b types.Block) *processedBlock {
// Create the child node.
childID := b.ID()
......
package consensus
import (
"github.com/NebulousLabs/Sia/build"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/bolt"
......@@ -114,18 +115,23 @@ func (cs *ConsensusSet) readlockUpdateSubscribers(ce changeEntry) {
}
}
// initializeSubscribe will take a subscriber and feed them all of the
// managedInitializeSubscribe will take a subscriber and feed them all of the
// consensus changes that have occurred since the change provided.
//
// As a special case, using an empty id as the start will have all the changes
// sent to the modules starting with the genesis block.
func (cs *ConsensusSet) initializeSubscribe(subscriber modules.ConsensusSetSubscriber, start modules.ConsensusChangeID) error {
return cs.db.View(func(tx *bolt.Tx) error {
// 'exists' and 'entry' are going to be pointed to the first entry that
// has not yet been seen by subscriber.
var exists bool
var entry changeEntry
func (cs *ConsensusSet) managedInitializeSubscribe(subscriber modules.ConsensusSetSubscriber, start modules.ConsensusChangeID) error {
if start == modules.ConsensusChangeRecent {
return nil
}
// 'exists' and 'entry' are going to be pointed to the first entry that
// has not yet been seen by subscriber.
var exists bool
var entry changeEntry
cs.mu.RLock()
err := cs.db.View(func(tx *bolt.Tx) error {
if start == modules.ConsensusChangeBeginning {
// Special case: for modules.ConsensusChangeBeginning, create an
// initial node pointing to the genesis block. The subscriber will
......@@ -133,12 +139,6 @@ func (cs *ConsensusSet) initializeSubscribe(subscriber modules.ConsensusSetSubsc
// the genesis block.
entry = cs.genesisEntry()
exists = true
} else if start == modules.ConsensusChangeRecent {
// Special case: for modules.ConsensusChangeRecent, set up the
// subscriber to start receiving only new blocks, but the
// subscriber does not need to do any catch-up. For this
// implementation, a no-op will have this effect.
return nil
} else {
// The subscriber has provided an existing consensus change.
// Because the subscriber already has this consensus change,
......@@ -155,18 +155,35 @@ func (cs *ConsensusSet) initializeSubscribe(subscriber modules.ConsensusSetSubsc
}
entry, exists = entry.NextEntry(tx)
}
return nil
})
cs.mu.RUnlock()
if err != nil {
return err
}
// Send all remaining consensus changes to the subscriber.
for exists {
cc, err := cs.computeConsensusChange(tx, entry)
if err != nil {
return err
// Send all remaining consensus changes to the subscriber.
for exists {
// Send changes in batches of 100 so that we don't hold the
// lock for too long.
cs.mu.RLock()
err = cs.db.View(func(tx *bolt.Tx) error {
for i := 0; i < 100 && exists; i++ {
cc, err := cs.computeConsensusChange(tx, entry)
if err != nil {
return err
}
subscriber.ProcessConsensusChange(cc)
entry, exists = entry.NextEntry(tx)
}
subscriber.ProcessConsensusChange(cc)
entry, exists = entry.NextEntry(tx)
return nil
})
cs.mu.RUnlock()
if err != nil {
return err
}
return nil
})
}
return nil
}
// ConsensusSetSubscribe adds a subscriber to the list of subscribers, and
......@@ -181,18 +198,23 @@ func (cs *ConsensusSet) ConsensusSetSubscribe(subscriber modules.ConsensusSetSub
return err
}
defer cs.tg.Done()
cs.mu.Lock()
defer cs.mu.Unlock()
// Get the input module caught up to the currenct consnesus set.
cs.subscribers = append(cs.subscribers, subscriber)
err = cs.initializeSubscribe(subscriber, start)
// Get the input module caught up to the current consensus set.
err = cs.managedInitializeSubscribe(subscriber, start)
if err != nil {
// Remove the subscriber from the set of subscribers.
cs.subscribers = cs.subscribers[:len(cs.subscribers)-1]
return err
}
// Only add the module as a subscriber if there was no error.
// Add the module to the list of subscribers.
cs.mu.Lock()
// Check that this subscriber is not already subscribed.
for _, s := range cs.subscribers {
if s == subscriber {
build.Critical("refusing to double-subscribe subscriber")
}
}
cs.subscribers = append(cs.subscribers, subscriber)
cs.mu.Unlock()
return nil
}
......
......@@ -217,6 +217,7 @@ func (cs *ConsensusSet) threadedReceiveBlocks(conn modules.PeerConn) error {
return err
}
finishedChan := make(chan struct{})
defer close(finishedChan)
go func() {
select {
case <-cs.tg.StopChan():
......@@ -520,7 +521,7 @@ func (cs *ConsensusSet) managedReceiveBlock(id types.BlockID) modules.RPCFunc {
// outbound peers <= v0.5.1 that are stalled in IBD.
func (cs *ConsensusSet) threadedInitialBlockchainDownload() error {
// The consensus set will not recognize IBD as complete until it has enough
// peers. After the deadline though, it will recognize the blochchain
// peers. After the deadline though, it will recognize the blockchain
// download as complete even with only one peer. This deadline is helpful
// to local-net setups, where a machine will frequently only have one peer
// (and that peer will be another machine on the same local network, but
......
......@@ -490,6 +490,6 @@ func TestGenesisBlockSync(t *testing.T) {
time.Sleep(time.Second * 12)
if len(cst1.gateway.Peers()) == 0 {
t.Error("disconnection occured!")
t.Error("disconnection occurred!")
}
}
......@@ -317,7 +317,7 @@ func validTransaction(tx *bolt.Tx, t types.Transaction) error {
// determine if they are valid. An error is returned IFF they are not a valid
// set in the current consensus set. The size of the transactions and the set
// is not checked. After the transactions have been validated, a consensus
// change is returned detailing the diffs that the transaciton set would have.
// change is returned detailing the diffs that the transactions set would have.
func (cs *ConsensusSet) tryTransactionSet(txns []types.Transaction) (modules.ConsensusChange, error) {
// applyTransaction will apply the diffs from a transaction and store them
// in a block node. diffHolder is the blockNode that tracks the temporary
......@@ -359,7 +359,7 @@ func (cs *ConsensusSet) tryTransactionSet(txns []types.Transaction) (modules.Con
// determine if they are valid. An error is returned IFF they are not a valid
// set in the current consensus set. The size of the transactions and the set
// is not checked. After the transactions have been validated, a consensus
// change is returned detailing the diffs that the transaciton set would have.
// change is returned detailing the diffs that the transactions set would have.
func (cs *ConsensusSet) TryTransactionSet(txns []types.Transaction) (modules.ConsensusChange, error) {
err := cs.tg.Add()
if err != nil {
......
......@@ -26,6 +26,9 @@ const (
// Reject peers < v0.4.0 as the previous version is v0.3.3 which is
// pre-hardfork.
minAcceptableVersion = "0.4.0"
// saveFrequency defines how often the gateway saves its persistence.
saveFrequency = time.Minute * 2
)
var (
......@@ -70,6 +73,15 @@ var (
Testing: 500 * time.Millisecond,
}).(time.Duration)
// peerRPCDelay defines the amount of time waited between each RPC accepted
// from a peer. Without this delay, a peer can force us to spin up thousands
// of goroutines per second.
peerRPCDelay = build.Select(build.Var{
Standard: 3 * time.Second,
Dev: 1 * time.Second,
Testing: 25 * time.Millisecond,
}).(time.Duration)
// pruneNodeListLen defines the number of nodes that the gateway must have
// to be pruning nodes from the node list.
pruneNodeListLen = build.Select(build.Var{
......@@ -177,7 +189,7 @@ var (
// the gateway will abort a connection attempt after this long
dialTimeout = build.Select(build.Var{
Standard: 2 * time.Minute,
Standard: 3 * time.Minute,
Dev: 20 * time.Second,
Testing: 500 * time.Millisecond,
}).(time.Duration)
......@@ -185,8 +197,8 @@ var (
// rpcStdDeadline defines the standard deadline that should be used for all
// incoming RPC calls.
rpcStdDeadline = build.Select(build.Var{
Standard: 10 * time.Minute,
Dev: 5 * time.Minute,
Testing: 90 * time.Second,
Standard: 5 * time.Minute,
Dev: 3 * time.Minute,
Testing: 15 * time.Second,
}).(time.Duration)
)
......@@ -54,7 +54,7 @@ import (
// after they successfully form a connection with the gateway. To limit the
// attacker's ability to add nodes to the nodelist, connections are
// ratelimited. An attacker with lots of IP addresses still has the ability to
// fill up the nodelist, however getting 90% dominance of the nodelist requries
// fill up the nodelist, however getting 90% dominance of the nodelist requires
// forming thousands of connections, which will take hours or days. By that
// time, the attacked node should already have its set of outbound peers,
// limiting the amount of damage that the attacker can do.
......@@ -88,7 +88,7 @@ import (
// of bootstrap nodes. If there is any cross-polination (which an attacker
// could do pretty easily), the gateways will not clean up over time, which
// will degrade the quality of the flood network as the two networks will
// continously flood eachother with irrelevant information. Additionally, there
// continuously flood eachother with irrelevant information. Additionally, there
// is no public key exhcange, so communications cannot be effectively encrypted
// or authenticated. The nodes must have some way to share keys.
//
......@@ -191,8 +191,8 @@ type Gateway struct {
}
// managedSleep will sleep for the given period of time. If the full time
// elapses, 'false' is returned. If the sleep is interrupted for shutdown,
// 'true' is returned.
// elapses, 'true' is returned. If the sleep is interrupted for shutdown,
// 'false' is returned.
func (g *Gateway) managedSleep(t time.Duration) (completed bool) {
select {
case <-time.After(t):
......@@ -275,6 +275,17 @@ func New(addr string, bootstrap bool, persistDir string) (*Gateway, error) {
if loadErr := g.load(); loadErr != nil && !os.IsNotExist(loadErr) {
return nil, loadErr
}
// Spawn the thread to periodically save the gateway.
go g.threadedSaveLoop()
// Make sure that the gateway saves after shutdown.
g.threads.AfterStop(func() {
g.mu.Lock()
err = g.saveSync()
g.mu.Unlock()
if err != nil {
g.log.Println("ERROR: Unable to save gateway:", err)
}
})
// Add the bootstrap peers to the node list.
if bootstrap {
......
......@@ -140,9 +140,9 @@ func (g *Gateway) requestNodes(conn modules.PeerConn) error {
g.log.Printf("WARN: peer '%v' sent the invalid addr '%v'", conn.RPCAddr(), node)
}
}
err := g.save()
err := g.saveSync()
if err != nil {
g.log.Println("WARN: failed to save nodelist after requesting nodes:", err)
g.log.Println("ERROR: unable to save new nodes added to the gateway:", err)
}
g.mu.Unlock()
return nil
......@@ -157,7 +157,7 @@ func (g *Gateway) permanentNodePurger(closeChan chan struct{}) {
for {
// Choose an amount of time to wait before attempting to prune a node.
// Nodes will occasionally go offline for some time, which can even be
// days. We don't want to too aggressivley prune nodes with low-moderate
// days. We don't want to too aggressively prune nodes with low-moderate
// uptime, as they are still useful to the network.
//
// But if there are a lot of nodes, we want to make sure that the node
......@@ -167,7 +167,7 @@ func (g *Gateway) permanentNodePurger(closeChan chan struct{}) {
//
// This value is a ratelimit which tries to keep the nodes list in the
// gateawy healthy. A more complex algorithm might adjust this number
// according to the percentage of prune attemtps that are successful
// according to the percentage of prune attempts that are successful
// (decrease prune frequency if most nodes in the database are online,
// increase prune frequency if more nodes in the database are offline).
waitTime := nodePurgeDelay
......@@ -226,7 +226,6 @@ func (g *Gateway) permanentNodePurger(closeChan chan struct{}) {
if err != nil {
g.mu.Lock()
g.removeNode(node)
g.save()
g.mu.Unlock()
g.log.Debugf("INFO: removing node %q because it could not be reached during a random scan: %v", node, err)
}
......
......@@ -164,7 +164,7 @@ func (g *Gateway) managedAcceptConnOldPeer(conn net.Conn, remoteVersion string)
sess: muxado.Server(conn),
})
g.addNode(addr)
return g.save()
return nil
}
// managedAcceptConnNewPeer accepts connection requests from peers >= v1.0.0.
......@@ -206,7 +206,6 @@ func (g *Gateway) managedAcceptConnNewPeer(conn net.Conn, remoteVersion string)
if err == nil {
g.mu.Lock()
g.addNode(remoteAddr)
g.save()
g.mu.Unlock()
}
}()
......@@ -367,7 +366,12 @@ func (g *Gateway) managedConnectOldPeer(conn net.Conn, remoteVersion string, rem
// about duplicates and we have already validated the address by
// connecting to it.
g.addNode(remoteAddr)
return g.save()
// We want to persist the outbound peers.
err := g.saveSync()
if err != nil {
g.log.Println("ERROR: Unable to save new outbound peer to gateway:", err)
}
return nil
}
// managedConnectNewPeer connects to peers >= v1.0.0. The peer is added as a
......@@ -399,7 +403,12 @@ func (g *Gateway) managedConnectNewPeer(conn net.Conn, remoteVersion string, rem
// about duplicates and we have already validated the address by
// connecting to it.
g.addNode(remoteAddr)
return g.save()
// We want to persist the outbound peers.
err = g.saveSync()
if err != nil {
g.log.Println("ERROR: Unable to save new outbound peer to gateway:", err)
}
return nil
}
// managedConnect establishes a persistent connection to a peer, and adds it to
......
......@@ -102,7 +102,7 @@ func (g *Gateway) permanentPeerManager(closedChan chan struct{}) {
// We need at least some of our outbound peers to be remote peers. If
// we already have reached a certain threshold of outbound peers and
// this peer is a local peer, do not consider it for an outbound peer.
// Sleep breifly to prevent the gateway from hogging the CPU if all
// Sleep briefly to prevent the gateway from hogging the CPU if all
// peers are local.
if numOutboundPeers >= maxLocalOutboundPeers && addr.IsLocal() && build.Release != "testing" {
g.log.Debugln("[PPM] Ignorning selected peer; this peer is local and we already have multiple outbound peers:", addr)
......
......@@ -2,6 +2,7 @@ package gateway
import (
"path/filepath"
"time"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/persist"
......@@ -33,7 +34,7 @@ func (g *Gateway) persistData() (nodes []modules.NetAddress) {
// load loads the Gateway's persistent data from disk.
func (g *Gateway) load() error {
var nodes []modules.NetAddress
err := persist.LoadFile(persistMetadata, &nodes, filepath.Join(g.persistDir, nodesFile))
err := persist.LoadJSON(persistMetadata, &nodes, filepath.Join(g.persistDir, nodesFile))
if err != nil {
return err
}
......@@ -46,13 +47,34 @@ func (g *Gateway) load() error {
return nil
}
// save stores the Gateway's persistent data on disk.