Commit 4201e9f0 authored by David Vorick's avatar David Vorick

clean up remaining instances of string(pubkey.key)

parent 200f6dd6
Pipeline #40910149 passed with stages
in 28 minutes and 33 seconds
......@@ -353,7 +353,7 @@ func (c *Contractor) managedNewContract(host modules.HostDBEntry, contractFundin
// Add a mapping from the contract's id to the public key of the host.
c.mu.Lock()
c.contractIDToPubKey[contract.ID] = contract.HostPublicKey
_, exists := c.pubKeysToContractID[string(contract.HostPublicKey.Key)]
_, exists := c.pubKeysToContractID[contract.HostPublicKey.String()]
if exists {
c.mu.Unlock()
txnBuilder.Drop()
......@@ -362,7 +362,7 @@ func (c *Contractor) managedNewContract(host modules.HostDBEntry, contractFundin
c.log.Println("WARN: Attempted to form a new contract with a host that we already have a contrat with.")
return contractFunding, modules.RenterContract{}, fmt.Errorf("We already have a contract with host %v", contract.HostPublicKey)
}
c.pubKeysToContractID[string(contract.HostPublicKey.Key)] = contract.ID
c.pubKeysToContractID[contract.HostPublicKey.String()] = contract.ID
c.mu.Unlock()
contractValue := contract.RenterFunds
......@@ -376,7 +376,7 @@ func (c *Contractor) managedPrunePubkeyMap() {
allContracts := c.staticContracts.ViewAll()
pks := make(map[string]struct{})
for _, c := range allContracts {
pks[string(c.HostPublicKey.Key)] = struct{}{}
pks[c.HostPublicKey.String()] = struct{}{}
}
c.mu.Lock()
for pk := range c.pubKeysToContractID {
......@@ -490,7 +490,7 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types.
// modules are only interested in the most recent contract anyway.
c.mu.Lock()
c.contractIDToPubKey[newContract.ID] = newContract.HostPublicKey
c.pubKeysToContractID[string(newContract.HostPublicKey.Key)] = newContract.ID
c.pubKeysToContractID[newContract.HostPublicKey.String()] = newContract.ID
c.mu.Unlock()
return newContract, nil
......
......@@ -269,11 +269,11 @@ func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb host
// Initialize the contractIDToPubKey map
for _, contract := range c.oldContracts {
c.contractIDToPubKey[contract.ID] = contract.HostPublicKey
c.pubKeysToContractID[string(contract.HostPublicKey.Key)] = contract.ID
c.pubKeysToContractID[contract.HostPublicKey.String()] = contract.ID
}
for _, contract := range c.staticContracts.ViewAll() {
c.contractIDToPubKey[contract.ID] = contract.HostPublicKey
c.pubKeysToContractID[string(contract.HostPublicKey.Key)] = contract.ID
c.pubKeysToContractID[contract.HostPublicKey.String()] = contract.ID
}
// Update the allowance in the hostdb with the one that was loaded from
......
......@@ -36,7 +36,7 @@ func (c *Contractor) managedContractUtility(id types.FileContractID) (modules.Co
// contract.
func (c *Contractor) ContractByPublicKey(pk types.SiaPublicKey) (modules.RenterContract, bool) {
c.mu.RLock()
id, ok := c.pubKeysToContractID[string(pk.Key)]
id, ok := c.pubKeysToContractID[pk.String()]
c.mu.RUnlock()
if !ok {
return modules.RenterContract{}, false
......@@ -73,7 +73,7 @@ func (c *Contractor) OldContracts() []modules.RenterContract {
// ContractUtility returns the utility fields for the given contract.
func (c *Contractor) ContractUtility(pk types.SiaPublicKey) (modules.ContractUtility, bool) {
c.mu.RLock()
id, ok := c.pubKeysToContractID[string(pk.Key)]
id, ok := c.pubKeysToContractID[pk.String()]
c.mu.RUnlock()
if !ok {
return modules.ContractUtility{}, false
......
......@@ -103,7 +103,7 @@ func (hd *hostDownloader) Sector(root crypto.Hash) ([]byte, error) {
// from a host.
func (c *Contractor) Downloader(pk types.SiaPublicKey, cancel <-chan struct{}) (_ Downloader, err error) {
c.mu.RLock()
id, gotID := c.pubKeysToContractID[string(pk.Key)]
id, gotID := c.pubKeysToContractID[pk.String()]
cachedDownloader, haveDownloader := c.downloaders[id]
height := c.blockHeight
renewing := c.renewing[id]
......
......@@ -115,7 +115,7 @@ func (he *hostEditor) Upload(data []byte) (_ crypto.Hash, err error) {
// delete sectors on a host.
func (c *Contractor) Editor(pk types.SiaPublicKey, cancel <-chan struct{}) (_ Editor, err error) {
c.mu.RLock()
id, gotID := c.pubKeysToContractID[string(pk.Key)]
id, gotID := c.pubKeysToContractID[pk.String()]
cachedEditor, haveEditor := c.editors[id]
height := c.blockHeight
renewing := c.renewing[id]
......
......@@ -408,11 +408,11 @@ func (r *Renter) managedNewDownload(params downloadParams) (*download, error) {
if piece.Chunk >= minChunk && piece.Chunk <= maxChunk {
// Sanity check - the same worker should not have two pieces for
// the same chunk.
_, exists := chunkMaps[piece.Chunk-minChunk][string(resolvedKey.Key)]
_, exists := chunkMaps[piece.Chunk-minChunk][resolvedKey.String()]
if exists {
r.log.Println("ERROR: Worker has multiple pieces uploaded for the same chunk.")
}
chunkMaps[piece.Chunk-minChunk][string(resolvedKey.Key)] = downloadPieceInfo{
chunkMaps[piece.Chunk-minChunk][resolvedKey.String()] = downloadPieceInfo{
index: piece.Piece,
root: piece.MerkleRoot,
}
......
......@@ -61,7 +61,7 @@ func verifyTree(tree *HostTree, nentries int) error {
if len(entries) == 0 {
return errors.New("no hosts")
}
selectionMap[string(entries[0].PublicKey.Key)]++
selectionMap[entries[0].PublicKey.String()]++
}
// See if each host was selected enough times.
......@@ -82,7 +82,7 @@ func verifyTree(tree *HostTree, nentries int) error {
randWeight := fastrand.BigIntn(tree.root.weight.Big())
node := tree.root.nodeAtWeight(types.NewCurrency(randWeight))
node.remove()
delete(tree.hosts, string(node.entry.PublicKey.Key))
delete(tree.hosts, node.entry.PublicKey.String())
// remove the entry from the hostdb so it won't be selected as a
// repeat
......@@ -203,7 +203,7 @@ func TestHostTreeParallel(t *testing.T) {
if err != nil {
t.Error(err)
}
inserted[string(entry.PublicKey.Key)] = entry
inserted[entry.PublicKey.String()] = entry
mu.Lock()
nelements++
......@@ -219,7 +219,7 @@ func TestHostTreeParallel(t *testing.T) {
if err != nil {
t.Error(err)
}
delete(inserted, string(entry.PublicKey.Key))
delete(inserted, entry.PublicKey.String())
mu.Lock()
nelements--
......@@ -239,7 +239,7 @@ func TestHostTreeParallel(t *testing.T) {
if err != nil {
t.Error(err)
}
inserted[string(entry.PublicKey.Key)] = newentry
inserted[entry.PublicKey.String()] = newentry
// FETCH
case 3:
......@@ -287,7 +287,7 @@ func TestHostTreeModify(t *testing.T) {
targetKey := keys[fastrand.Intn(treeSize)]
oldEntry := tree.hosts[string(targetKey.Key)].entry
oldEntry := tree.hosts[targetKey.String()].entry
newEntry := makeHostDBEntry()
newEntry.AcceptingContracts = false
newEntry.PublicKey = oldEntry.PublicKey
......@@ -297,7 +297,7 @@ func TestHostTreeModify(t *testing.T) {
t.Fatal(err)
}
if tree.hosts[string(targetKey.Key)].entry.AcceptingContracts {
if tree.hosts[targetKey.String()].entry.AcceptingContracts {
t.Fatal("modify did not update host entry")
}
}
......@@ -335,7 +335,7 @@ func TestVariedWeights(t *testing.T) {
if len(randEntry) == 0 {
t.Fatal("no hosts!")
}
node, exists := tree.hosts[string(randEntry[0].PublicKey.Key)]
node, exists := tree.hosts[randEntry[0].PublicKey.String()]
if !exists {
t.Fatal("can't find randomly selected node in tree")
}
......@@ -395,7 +395,7 @@ func TestNodeAtWeight(t *testing.T) {
}
h := tree.root.nodeAtWeight(weight)
if string(h.entry.HostDBEntry.PublicKey.Key) != string(entry.PublicKey.Key) {
if h.entry.HostDBEntry.PublicKey.String() != entry.PublicKey.String() {
t.Errorf("nodeAtWeight returned wrong node: expected %v, got %v", entry, h.entry)
}
}
......
......@@ -424,7 +424,7 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, roots []crypto.Ha
}
cs.mu.Lock()
cs.contracts[sc.header.ID()] = sc
cs.pubKeys[string(h.HostPublicKey().Key)] = sc.header.ID()
cs.pubKeys[h.HostPublicKey().String()] = sc.header.ID()
cs.mu.Unlock()
return sc.Metadata(), nil
}
......@@ -495,7 +495,7 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo
}
}
cs.contracts[sc.header.ID()] = sc
cs.pubKeys[string(header.HostPublicKey().Key)] = sc.header.ID()
cs.pubKeys[header.HostPublicKey().String()] = sc.header.ID()
return nil
}
......
......@@ -63,7 +63,7 @@ func (cs *ContractSet) Delete(c *SafeContract) {
return
}
delete(cs.contracts, c.header.ID())
delete(cs.pubKeys, string(c.header.HostPublicKey().Key))
delete(cs.pubKeys, c.header.HostPublicKey().String())
cs.mu.Unlock()
c.mu.Unlock()
// delete contract file
......
......@@ -45,7 +45,7 @@ func (w *worker) managedDownload(udc *unfinishedDownloadChunk) {
d.Close()
}()
pieceData, err := d.Sector(udc.staticChunkMap[string(w.contract.HostPublicKey.Key)].root)
pieceData, err := d.Sector(udc.staticChunkMap[w.contract.HostPublicKey.String()].root)
if err != nil {
w.renter.log.Debugln("worker failed to download sector:", err)
udc.managedUnregisterWorker(w)
......@@ -61,7 +61,7 @@ func (w *worker) managedDownload(udc *unfinishedDownloadChunk) {
// Decrypt the piece. This might introduce some overhead for downloads with
// a large overdrive. It shouldn't be a bottleneck though since bandwidth
// is usually a lot more scarce than CPU processing power.
pieceIndex := udc.staticChunkMap[string(w.contract.HostPublicKey.Key)].index
pieceIndex := udc.staticChunkMap[w.contract.HostPublicKey.String()].index
key := deriveKey(udc.masterKey, udc.staticChunkIndex, pieceIndex)
decryptedPiece, err := key.DecryptBytesInPlace(pieceData)
if err != nil {
......@@ -157,7 +157,7 @@ func (w *worker) managedQueueDownloadChunk(udc *unfinishedDownloadChunk) {
func (udc *unfinishedDownloadChunk) managedUnregisterWorker(w *worker) {
udc.mu.Lock()
udc.piecesRegistered--
udc.pieceUsage[udc.staticChunkMap[string(w.contract.HostPublicKey.Key)].index] = false
udc.pieceUsage[udc.staticChunkMap[w.contract.HostPublicKey.String()].index] = false
udc.mu.Unlock()
}
......@@ -184,7 +184,7 @@ func (w *worker) ownedProcessDownloadChunk(udc *unfinishedDownloadChunk) *unfini
udc.mu.Lock()
chunkComplete := udc.piecesCompleted >= udc.erasureCode.MinPieces() || udc.download.staticComplete()
chunkFailed := udc.piecesCompleted+udc.workersRemaining < udc.erasureCode.MinPieces()
pieceData, workerHasPiece := udc.staticChunkMap[string(w.contract.HostPublicKey.Key)]
pieceData, workerHasPiece := udc.staticChunkMap[w.contract.HostPublicKey.String()]
pieceCompleted := udc.completedPieces[pieceData.index]
if chunkComplete || chunkFailed || w.ownedOnDownloadCooldown() || !workerHasPiece || pieceCompleted {
udc.mu.Unlock()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment