Commit 6967d768 authored by Luke Champine's avatar Luke Champine

Merge branch 'filter-fix' into 'master'

Filter fix

See merge request !3382
parents 7f951534 4201e9f0
Pipeline #40914054 passed with stages
in 32 minutes and 10 seconds
......@@ -353,7 +353,7 @@ func (c *Contractor) managedNewContract(host modules.HostDBEntry, contractFundin
// Add a mapping from the contract's id to the public key of the host.
c.mu.Lock()
c.contractIDToPubKey[contract.ID] = contract.HostPublicKey
_, exists := c.pubKeysToContractID[string(contract.HostPublicKey.Key)]
_, exists := c.pubKeysToContractID[contract.HostPublicKey.String()]
if exists {
c.mu.Unlock()
txnBuilder.Drop()
......@@ -362,7 +362,7 @@ func (c *Contractor) managedNewContract(host modules.HostDBEntry, contractFundin
c.log.Println("WARN: Attempted to form a new contract with a host that we already have a contrat with.")
return contractFunding, modules.RenterContract{}, fmt.Errorf("We already have a contract with host %v", contract.HostPublicKey)
}
c.pubKeysToContractID[string(contract.HostPublicKey.Key)] = contract.ID
c.pubKeysToContractID[contract.HostPublicKey.String()] = contract.ID
c.mu.Unlock()
contractValue := contract.RenterFunds
......@@ -376,7 +376,7 @@ func (c *Contractor) managedPrunePubkeyMap() {
allContracts := c.staticContracts.ViewAll()
pks := make(map[string]struct{})
for _, c := range allContracts {
pks[string(c.HostPublicKey.Key)] = struct{}{}
pks[c.HostPublicKey.String()] = struct{}{}
}
c.mu.Lock()
for pk := range c.pubKeysToContractID {
......@@ -490,7 +490,7 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types.
// modules are only interested in the most recent contract anyway.
c.mu.Lock()
c.contractIDToPubKey[newContract.ID] = newContract.HostPublicKey
c.pubKeysToContractID[string(newContract.HostPublicKey.Key)] = newContract.ID
c.pubKeysToContractID[newContract.HostPublicKey.String()] = newContract.ID
c.mu.Unlock()
return newContract, nil
......
......@@ -269,11 +269,11 @@ func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb host
// Initialize the contractIDToPubKey map
for _, contract := range c.oldContracts {
c.contractIDToPubKey[contract.ID] = contract.HostPublicKey
c.pubKeysToContractID[string(contract.HostPublicKey.Key)] = contract.ID
c.pubKeysToContractID[contract.HostPublicKey.String()] = contract.ID
}
for _, contract := range c.staticContracts.ViewAll() {
c.contractIDToPubKey[contract.ID] = contract.HostPublicKey
c.pubKeysToContractID[string(contract.HostPublicKey.Key)] = contract.ID
c.pubKeysToContractID[contract.HostPublicKey.String()] = contract.ID
}
// Update the allowance in the hostdb with the one that was loaded from
......
......@@ -36,7 +36,7 @@ func (c *Contractor) managedContractUtility(id types.FileContractID) (modules.Co
// contract.
func (c *Contractor) ContractByPublicKey(pk types.SiaPublicKey) (modules.RenterContract, bool) {
c.mu.RLock()
id, ok := c.pubKeysToContractID[string(pk.Key)]
id, ok := c.pubKeysToContractID[pk.String()]
c.mu.RUnlock()
if !ok {
return modules.RenterContract{}, false
......@@ -73,7 +73,7 @@ func (c *Contractor) OldContracts() []modules.RenterContract {
// ContractUtility returns the utility fields for the given contract.
func (c *Contractor) ContractUtility(pk types.SiaPublicKey) (modules.ContractUtility, bool) {
c.mu.RLock()
id, ok := c.pubKeysToContractID[string(pk.Key)]
id, ok := c.pubKeysToContractID[pk.String()]
c.mu.RUnlock()
if !ok {
return modules.ContractUtility{}, false
......
......@@ -103,7 +103,7 @@ func (hd *hostDownloader) Sector(root crypto.Hash) ([]byte, error) {
// from a host.
func (c *Contractor) Downloader(pk types.SiaPublicKey, cancel <-chan struct{}) (_ Downloader, err error) {
c.mu.RLock()
id, gotID := c.pubKeysToContractID[string(pk.Key)]
id, gotID := c.pubKeysToContractID[pk.String()]
cachedDownloader, haveDownloader := c.downloaders[id]
height := c.blockHeight
renewing := c.renewing[id]
......
......@@ -115,7 +115,7 @@ func (he *hostEditor) Upload(data []byte) (_ crypto.Hash, err error) {
// delete sectors on a host.
func (c *Contractor) Editor(pk types.SiaPublicKey, cancel <-chan struct{}) (_ Editor, err error) {
c.mu.RLock()
id, gotID := c.pubKeysToContractID[string(pk.Key)]
id, gotID := c.pubKeysToContractID[pk.String()]
cachedEditor, haveEditor := c.editors[id]
height := c.blockHeight
renewing := c.renewing[id]
......
......@@ -408,11 +408,11 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
if piece.Chunk >= minChunk && piece.Chunk <= maxChunk {
// Sanity check - the same worker should not have two pieces for
// the same chunk.
_, exists := chunkMaps[piece.Chunk-minChunk][string(resolvedKey.Key)]
_, exists := chunkMaps[piece.Chunk-minChunk][resolvedKey.String()]
if exists {
r.log.Println("ERROR: Worker has multiple pieces uploaded for the same chunk.")
}
chunkMaps[piece.Chunk-minChunk][string(resolvedKey.Key)] = downloadPieceInfo{
chunkMaps[piece.Chunk-minChunk][resolvedKey.String()] = downloadPieceInfo{
index: piece.Piece,
root: piece.MerkleRoot,
}
......
......@@ -88,7 +88,7 @@ type HostDB struct {
// insert inserts the HostDBEntry into both hosttrees
func (hdb *HostDB) insert(host modules.HostDBEntry) error {
err := hdb.hostTree.Insert(host)
_, ok := hdb.filteredHosts[string(host.PublicKey.Key)]
_, ok := hdb.filteredHosts[host.PublicKey.String()]
isWhitelist := hdb.filterMode == modules.HostDBActiveWhitelist
if isWhitelist == ok {
errF := hdb.filteredTree.Insert(host)
......@@ -102,7 +102,7 @@ func (hdb *HostDB) insert(host modules.HostDBEntry) error {
// modify modifies the HostDBEntry in both hosttrees
func (hdb *HostDB) modify(host modules.HostDBEntry) error {
err := hdb.hostTree.Modify(host)
_, ok := hdb.filteredHosts[string(host.PublicKey.Key)]
_, ok := hdb.filteredHosts[host.PublicKey.String()]
isWhitelist := hdb.filterMode == modules.HostDBActiveWhitelist
if isWhitelist == ok {
err = errors.Compose(err, hdb.filteredTree.Modify(host))
......@@ -113,7 +113,7 @@ func (hdb *HostDB) modify(host modules.HostDBEntry) error {
// remove removes the HostDBEntry from both hosttrees
func (hdb *HostDB) remove(pk types.SiaPublicKey) error {
err := hdb.hostTree.Remove(pk)
_, ok := hdb.filteredHosts[string(pk.Key)]
_, ok := hdb.filteredHosts[pk.String()]
isWhitelist := hdb.filterMode == modules.HostDBActiveWhitelist
if isWhitelist == ok {
errF := hdb.filteredTree.Remove(pk)
......@@ -374,7 +374,7 @@ func (hdb *HostDB) Host(spk types.SiaPublicKey) (modules.HostDBEntry, bool) {
if !exists {
return host, exists
}
_, ok := filteredHosts[string(spk.Key)]
_, ok := filteredHosts[spk.String()]
host.Filtered = whitelist != ok
hdb.mu.RLock()
updateHostHistoricInteractions(&host, hdb.blockHeight)
......@@ -422,16 +422,16 @@ func (hdb *HostDB) SetFilterMode(fm modules.FilterMode, hosts []types.SiaPublicK
// Create filteredHosts map
filteredHosts := make(map[string]types.SiaPublicKey)
for _, h := range hosts {
if _, ok := filteredHosts[string(h.Key)]; ok {
if _, ok := filteredHosts[h.String()]; ok {
continue
}
filteredHosts[string(h.Key)] = h
filteredHosts[h.String()] = h
}
var allErrs error
allHosts := hdb.hostTree.All()
for _, host := range allHosts {
// Add hosts to filtered tree
_, ok := filteredHosts[string(host.PublicKey.Key)]
_, ok := filteredHosts[host.PublicKey.String()]
if isWhitelist != ok {
continue
}
......@@ -514,7 +514,7 @@ func (hdb *HostDB) RandomHostsWithAllowance(n int, blacklist, addressBlacklist [
isWhitelist := filterType == modules.HostDBActiveWhitelist
for _, host := range allHosts {
// Filter out listed hosts
_, ok := filteredHosts[string(host.PublicKey.Key)]
_, ok := filteredHosts[host.PublicKey.String()]
if isWhitelist != ok {
continue
}
......
......@@ -211,7 +211,7 @@ func TestRandomHosts(t *testing.T) {
nEntries := int(1e3)
for i := 0; i < nEntries; i++ {
entry := makeHostDBEntry()
entries[string(entry.PublicKey.Key)] = entry
entries[entry.PublicKey.String()] = entry
err := hdbt.hdb.filteredTree.Insert(entry)
if err != nil {
t.Error(err)
......@@ -229,15 +229,15 @@ func TestRandomHosts(t *testing.T) {
}
dupCheck := make(map[string]modules.HostDBEntry)
for _, host := range hosts {
_, exists := entries[string(host.PublicKey.Key)]
_, exists := entries[host.PublicKey.String()]
if !exists {
t.Error("hostdb returning host that doesn't exist.")
}
_, exists = dupCheck[string(host.PublicKey.Key)]
_, exists = dupCheck[host.PublicKey.String()]
if exists {
t.Error("RandomHosts returning duplicates")
}
dupCheck[string(host.PublicKey.Key)] = host
dupCheck[host.PublicKey.String()] = host
}
}
......@@ -251,15 +251,15 @@ func TestRandomHosts(t *testing.T) {
t.Fatalf("RandomHosts returned few entries. got %v wanted %v\n", len(hosts), nEntries/2)
}
for _, host := range hosts {
_, exists := entries[string(host.PublicKey.Key)]
_, exists := entries[host.PublicKey.String()]
if !exists {
t.Error("hostdb returning host that doesn't exist.")
}
_, exists = dupCheck1[string(host.PublicKey.Key)]
_, exists = dupCheck1[host.PublicKey.String()]
if exists {
t.Error("RandomHosts returning duplicates")
}
dupCheck1[string(host.PublicKey.Key)] = host
dupCheck1[host.PublicKey.String()] = host
}
// Iterative case. Check that every time you query for random hosts, you
......@@ -275,21 +275,21 @@ func TestRandomHosts(t *testing.T) {
t.Fatalf("RandomHosts returned few entries. got %v wanted %v\n", len(hosts), nEntries/2)
}
for _, host := range hosts {
_, exists := entries[string(host.PublicKey.Key)]
_, exists := entries[host.PublicKey.String()]
if !exists {
t.Error("hostdb returning host that doesn't exist.")
}
_, exists = dupCheck2[string(host.PublicKey.Key)]
_, exists = dupCheck2[host.PublicKey.String()]
if exists {
t.Error("RandomHosts returning duplicates")
}
_, exists = dupCheck1[string(host.PublicKey.Key)]
_, exists = dupCheck1[host.PublicKey.String()]
if exists {
overlap = true
} else {
disjoint = true
}
dupCheck2[string(host.PublicKey.Key)] = host
dupCheck2[host.PublicKey.String()] = host
}
if !overlap || !disjoint {
......@@ -316,7 +316,7 @@ func TestRandomHosts(t *testing.T) {
if len(rand) != 1 {
t.Fatal("wrong number of hosts returned")
}
if string(rand[0].PublicKey.Key) != string(hosts[0].PublicKey.Key) {
if rand[0].PublicKey.String() != hosts[0].PublicKey.String() {
t.Error("exclude list seems to be excluding the wrong hosts.")
}
......@@ -328,7 +328,7 @@ func TestRandomHosts(t *testing.T) {
if len(rand) != 1 {
t.Fatal("wrong number of hosts returned")
}
if string(rand[0].PublicKey.Key) != string(hosts[0].PublicKey.Key) {
if rand[0].PublicKey.String() != hosts[0].PublicKey.String() {
t.Error("exclude list seems to be excluding the wrong hosts.")
}
......@@ -337,7 +337,7 @@ func TestRandomHosts(t *testing.T) {
// map.
includeMap := make(map[string]struct{})
for j := 0; j < 50; j++ {
includeMap[string(hosts[j].PublicKey.Key)] = struct{}{}
includeMap[hosts[j].PublicKey.String()] = struct{}{}
}
exclude = exclude[49:]
......@@ -351,12 +351,12 @@ func TestRandomHosts(t *testing.T) {
t.Error("random hosts is returning the wrong number of hosts")
}
for _, host := range rand {
_, exists := dupCheck[string(host.PublicKey.Key)]
_, exists := dupCheck[host.PublicKey.String()]
if exists {
t.Error("RandomHosts is selecting duplicates")
}
dupCheck[string(host.PublicKey.Key)] = struct{}{}
_, exists = includeMap[string(host.PublicKey.Key)]
dupCheck[host.PublicKey.String()] = struct{}{}
_, exists = includeMap[host.PublicKey.String()]
if !exists {
t.Error("RandomHosts returning excluded hosts")
}
......@@ -372,12 +372,12 @@ func TestRandomHosts(t *testing.T) {
t.Error("random hosts is returning the wrong number of hosts")
}
for _, host := range rand {
_, exists := dupCheck[string(host.PublicKey.Key)]
_, exists := dupCheck[host.PublicKey.String()]
if exists {
t.Error("RandomHosts is selecting duplicates")
}
dupCheck[string(host.PublicKey.Key)] = struct{}{}
_, exists = includeMap[string(host.PublicKey.Key)]
dupCheck[host.PublicKey.String()] = struct{}{}
_, exists = includeMap[host.PublicKey.String()]
if !exists {
t.Error("RandomHosts returning excluded hosts")
}
......@@ -393,12 +393,12 @@ func TestRandomHosts(t *testing.T) {
t.Error("random hosts is returning the wrong number of hosts")
}
for _, host := range rand {
_, exists := dupCheck[string(host.PublicKey.Key)]
_, exists := dupCheck[host.PublicKey.String()]
if exists {
t.Error("RandomHosts is selecting duplicates")
}
dupCheck[string(host.PublicKey.Key)] = struct{}{}
_, exists = includeMap[string(host.PublicKey.Key)]
dupCheck[host.PublicKey.String()] = struct{}{}
_, exists = includeMap[host.PublicKey.String()]
if !exists {
t.Error("RandomHosts returning excluded hosts")
}
......
......@@ -216,12 +216,12 @@ func (ht *HostTree) Remove(pk types.SiaPublicKey) error {
ht.mu.Lock()
defer ht.mu.Unlock()
node, exists := ht.hosts[string(pk.Key)]
node, exists := ht.hosts[pk.String()]
if !exists {
return ErrNoSuchHost
}
node.remove()
delete(ht.hosts, string(pk.Key))
delete(ht.hosts, pk.String())
return nil
}
......@@ -232,7 +232,7 @@ func (ht *HostTree) Modify(hdbe modules.HostDBEntry) error {
ht.mu.Lock()
defer ht.mu.Unlock()
node, exists := ht.hosts[string(hdbe.PublicKey.Key)]
node, exists := ht.hosts[hdbe.PublicKey.String()]
if !exists {
return ErrNoSuchHost
}
......@@ -246,7 +246,7 @@ func (ht *HostTree) Modify(hdbe modules.HostDBEntry) error {
_, node = ht.root.recursiveInsert(entry)
ht.hosts[string(entry.PublicKey.Key)] = node
ht.hosts[entry.PublicKey.String()] = node
return nil
}
......@@ -285,7 +285,7 @@ func (ht *HostTree) Select(spk types.SiaPublicKey) (modules.HostDBEntry, bool) {
ht.mu.Lock()
defer ht.mu.Unlock()
node, exists := ht.hosts[string(spk.Key)]
node, exists := ht.hosts[spk.String()]
if !exists {
return modules.HostDBEntry{}, false
}
......@@ -312,7 +312,7 @@ func (ht *HostTree) SelectRandom(n int, blacklist, addressBlacklist []types.SiaP
// Add the hosts from the addressBlacklist to the filter.
for _, pubkey := range addressBlacklist {
node, exists := ht.hosts[string(pubkey.Key)]
node, exists := ht.hosts[pubkey.String()]
if !exists {
continue
}
......@@ -322,13 +322,13 @@ func (ht *HostTree) SelectRandom(n int, blacklist, addressBlacklist []types.SiaP
// Remove hosts we want to blacklist from the tree but remember them to make
// sure we can insert them later.
for _, pubkey := range blacklist {
node, exists := ht.hosts[string(pubkey.Key)]
node, exists := ht.hosts[pubkey.String()]
if !exists {
continue
}
// Remove the host from the tree.
node.remove()
delete(ht.hosts, string(pubkey.Key))
delete(ht.hosts, pubkey.String())
// Remember the host to insert it again later.
removedEntries = append(removedEntries, node.entry)
......@@ -355,12 +355,12 @@ func (ht *HostTree) SelectRandom(n int, blacklist, addressBlacklist []types.SiaP
removedEntries = append(removedEntries, node.entry)
node.remove()
delete(ht.hosts, string(node.entry.PublicKey.Key))
delete(ht.hosts, node.entry.PublicKey.String())
}
for _, entry := range removedEntries {
_, node := ht.root.recursiveInsert(entry)
ht.hosts[string(entry.PublicKey.Key)] = node
ht.hosts[entry.PublicKey.String()] = node
}
return hosts
......@@ -389,12 +389,12 @@ func (ht *HostTree) insert(hdbe modules.HostDBEntry) error {
weight: ht.weightFn(hdbe).Score(),
}
if _, exists := ht.hosts[string(entry.PublicKey.Key)]; exists {
if _, exists := ht.hosts[entry.PublicKey.String()]; exists {
return ErrHostExists
}
_, node := ht.root.recursiveInsert(entry)
ht.hosts[string(entry.PublicKey.Key)] = node
ht.hosts[entry.PublicKey.String()] = node
return nil
}
......@@ -61,7 +61,7 @@ func verifyTree(tree *HostTree, nentries int) error {
if len(entries) == 0 {
return errors.New("no hosts")
}
selectionMap[string(entries[0].PublicKey.Key)]++
selectionMap[entries[0].PublicKey.String()]++
}
// See if each host was selected enough times.
......@@ -82,7 +82,7 @@ func verifyTree(tree *HostTree, nentries int) error {
randWeight := fastrand.BigIntn(tree.root.weight.Big())
node := tree.root.nodeAtWeight(types.NewCurrency(randWeight))
node.remove()
delete(tree.hosts, string(node.entry.PublicKey.Key))
delete(tree.hosts, node.entry.PublicKey.String())
// remove the entry from the hostdb so it won't be selected as a
// repeat
......@@ -203,7 +203,7 @@ func TestHostTreeParallel(t *testing.T) {
if err != nil {
t.Error(err)
}
inserted[string(entry.PublicKey.Key)] = entry
inserted[entry.PublicKey.String()] = entry
mu.Lock()
nelements++
......@@ -219,7 +219,7 @@ func TestHostTreeParallel(t *testing.T) {
if err != nil {
t.Error(err)
}
delete(inserted, string(entry.PublicKey.Key))
delete(inserted, entry.PublicKey.String())
mu.Lock()
nelements--
......@@ -239,7 +239,7 @@ func TestHostTreeParallel(t *testing.T) {
if err != nil {
t.Error(err)
}
inserted[string(entry.PublicKey.Key)] = newentry
inserted[entry.PublicKey.String()] = newentry
// FETCH
case 3:
......@@ -287,7 +287,7 @@ func TestHostTreeModify(t *testing.T) {
targetKey := keys[fastrand.Intn(treeSize)]
oldEntry := tree.hosts[string(targetKey.Key)].entry
oldEntry := tree.hosts[targetKey.String()].entry
newEntry := makeHostDBEntry()
newEntry.AcceptingContracts = false
newEntry.PublicKey = oldEntry.PublicKey
......@@ -297,7 +297,7 @@ func TestHostTreeModify(t *testing.T) {
t.Fatal(err)
}
if tree.hosts[string(targetKey.Key)].entry.AcceptingContracts {
if tree.hosts[targetKey.String()].entry.AcceptingContracts {
t.Fatal("modify did not update host entry")
}
}
......@@ -335,7 +335,7 @@ func TestVariedWeights(t *testing.T) {
if len(randEntry) == 0 {
t.Fatal("no hosts!")
}
node, exists := tree.hosts[string(randEntry[0].PublicKey.Key)]
node, exists := tree.hosts[randEntry[0].PublicKey.String()]
if !exists {
t.Fatal("can't find randomly selected node in tree")
}
......@@ -395,7 +395,7 @@ func TestNodeAtWeight(t *testing.T) {
}
h := tree.root.nodeAtWeight(weight)
if string(h.entry.HostDBEntry.PublicKey.Key) != string(entry.PublicKey.Key) {
if h.entry.HostDBEntry.PublicKey.String() != entry.PublicKey.String() {
t.Errorf("nodeAtWeight returned wrong node: expected %v, got %v", entry, h.entry)
}
}
......
......@@ -6,6 +6,8 @@ import (
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/Sia/types"
"gitlab.com/NebulousLabs/fastrand"
)
// quitAfterLoadDeps will quit startup in newHostDB
......@@ -52,8 +54,8 @@ func TestSaveLoad(t *testing.T) {
host1.FirstSeen = 1
host2.FirstSeen = 2
host3.FirstSeen = 3
host1.PublicKey.Key = []byte("foo")
host2.PublicKey.Key = []byte("bar")
host1.PublicKey.Key = fastrand.Bytes(32)
host2.PublicKey.Key = fastrand.Bytes(32)
host3.PublicKey.Key = []byte("baz")
hdbt.hdb.hostTree.Insert(host1)
hdbt.hdb.hostTree.Insert(host2)
......@@ -61,9 +63,9 @@ func TestSaveLoad(t *testing.T) {
// Manually set listed Hosts and filterMode
filteredHosts := make(map[string]types.SiaPublicKey)
filteredHosts[string(host1.PublicKey.Key)] = host1.PublicKey
filteredHosts[string(host2.PublicKey.Key)] = host2.PublicKey
filteredHosts[string(host3.PublicKey.Key)] = host3.PublicKey
filteredHosts[host1.PublicKey.String()] = host1.PublicKey
filteredHosts[host2.PublicKey.String()] = host2.PublicKey
filteredHosts[host3.PublicKey.String()] = host3.PublicKey
filterMode := modules.HostDBActiveWhitelist
// Save, close, and reload.
......@@ -120,13 +122,13 @@ func TestSaveLoad(t *testing.T) {
if hdbt.hdb.filterMode != modules.HostDBActiveWhitelist {
t.Error("filter mode should be whitelist")
}
if _, ok := hdbt.hdb.filteredHosts[string(host1.PublicKey.Key)]; !ok {
if _, ok := hdbt.hdb.filteredHosts[host1.PublicKey.String()]; !ok {
t.Error("host1 not found in filteredHosts")
}
if _, ok := hdbt.hdb.filteredHosts[string(host2.PublicKey.Key)]; !ok {
if _, ok := hdbt.hdb.filteredHosts[host2.PublicKey.String()]; !ok {
t.Error("host2 not found in filteredHosts")
}
if _, ok := hdbt.hdb.filteredHosts[string(host3.PublicKey.Key)]; !ok {
if _, ok := hdbt.hdb.filteredHosts[host3.PublicKey.String()]; !ok {
t.Error("host3 not found in filteredHosts")
}
}
......
......@@ -424,7 +424,7 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, roots []crypto.Ha
}
cs.mu.Lock()
cs.contracts[sc.header.ID()] = sc
cs.pubKeys[string(h.HostPublicKey().Key)] = sc.header.ID()
cs.pubKeys[h.HostPublicKey().String()] = sc.header.ID()
cs.mu.Unlock()
return sc.Metadata(), nil
}
......@@ -495,7 +495,7 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo
}
}
cs.contracts[sc.header.ID()] = sc
cs.pubKeys[string(header.HostPublicKey().Key)] = sc.header.ID()
cs.pubKeys[header.HostPublicKey().String()] = sc.header.ID()
return nil
}
......
......@@ -63,7 +63,7 @@ func (cs *ContractSet) Delete(c *SafeContract) {
return
}
delete(cs.contracts, c.header.ID())
delete(cs.pubKeys, string(c.header.HostPublicKey().Key))
delete(cs.pubKeys, c.header.HostPublicKey().String())
cs.mu.Unlock()
c.mu.Unlock()
// delete contract file
......
......@@ -45,7 +45,7 @@ func (w *worker) managedDownload(udc *unfinishedDownloadChunk) {
d.Close()
}()
pieceData, err := d.Sector(udc.staticChunkMap[string(w.contract.HostPublicKey.Key)].root)
pieceData, err := d.Sector(udc.staticChunkMap[w.contract.HostPublicKey.String()].root)
if err != nil {
w.renter.log.Debugln("worker failed to download sector:", err)
udc.managedUnregisterWorker(w)
......@@ -61,7 +61,7 @@ func (w *worker) managedDownload(udc *unfinishedDownloadChunk) {
// Decrypt the piece. This might introduce some overhead for downloads with
// a large overdrive. It shouldn't be a bottleneck though since bandwidth
// is usually a lot more scarce than CPU processing power.
pieceIndex := udc.staticChunkMap[string(w.contract.HostPublicKey.Key)].index
pieceIndex := udc.staticChunkMap[w.contract.HostPublicKey.String()].index
key := deriveKey(udc.masterKey, udc.staticChunkIndex, pieceIndex)
decryptedPiece, err := key.DecryptBytesInPlace(pieceData)
if err != nil {
......@@ -157,7 +157,7 @@ func (w *worker) managedQueueDownloadChunk(udc *unfinishedDownloadChunk) {
func (udc *unfinishedDownloadChunk) managedUnregisterWorker(w *worker) {
udc.mu.Lock()
udc.piecesRegistered--
udc.pieceUsage[udc.staticChunkMap[string(w.contract.HostPublicKey.Key)].index] = false
udc.pieceUsage[udc.staticChunkMap[w.contract.HostPublicKey.String()].index] = false
udc.mu.Unlock()
}
......@@ -184,7 +184,7 @@ func (w *worker) ownedProcessDownloadChunk(udc *unfinishedDownloadChunk) *unfini
udc.mu.Lock()
chunkComplete := udc.piecesCompleted >= udc.erasureCode.MinPieces() || udc.download.staticComplete()
chunkFailed := udc.piecesCompleted+udc.workersRemaining < udc.erasureCode.MinPieces()
pieceData, workerHasPiece := udc.staticChunkMap[string(w.contract.HostPublicKey.Key)]
pieceData, workerHasPiece := udc.staticChunkMap[w.contract.HostPublicKey.String()]
pieceCompleted := udc.completedPieces[pieceData.index]
if chunkComplete || chunkFailed || w.ownedOnDownloadCooldown() || !workerHasPiece || pieceCompleted {
udc.mu.Unlock()
......
......@@ -434,6 +434,10 @@ func TestUnspentOutputs(t *testing.T) {
// TestFileContractUnspentOutputs tests that outputs created from file
// contracts are properly handled by the wallet.
func TestFileContractUnspentOutputs(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
gp := siatest.GroupParams{
Hosts: 1,
Renters: 1,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment