Select Git revision
-
Reinis Prikulis authoredReinis Prikulis authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
testgroup.go 22.61 KiB
package siatest
import (
"fmt"
"math"
"path/filepath"
"reflect"
"sync"
"time"
"gitlab.com/NebulousLabs/errors"
"gitlab.com/scpcorp/ScPrime/build"
"gitlab.com/scpcorp/ScPrime/modules"
"gitlab.com/scpcorp/ScPrime/modules/host/contractmanager"
"gitlab.com/scpcorp/ScPrime/node"
"gitlab.com/scpcorp/ScPrime/node/api/client"
"gitlab.com/scpcorp/ScPrime/persist"
"gitlab.com/scpcorp/ScPrime/types"
)
type (
// GroupParams is a helper struct to make creating TestGroups easier.
GroupParams struct {
Hosts int // number of hosts to create
Renters int // number of renters to create
Miners int // number of miners to create
}
// TestGroup is a group of of TestNodes that are funded, synced and ready
// for upload, download and mining depending on their configuration
TestGroup struct {
nodes map[*TestNode]struct{}
hosts map[*TestNode]struct{}
renters map[*TestNode]struct{}
miners map[*TestNode]struct{}
stopped map[*TestNode]struct{}
dir string
}
)
var (
// DefaultAllowance is the allowance used for the group's renters.
//
// Note: the default allowance needs to be close enough in practice to what
// the host default settings are that price gouging protection does not kick
// in.
DefaultAllowance = modules.Allowance{
Funds: types.ScPrimecoinPrecision.Mul64(6),
Hosts: 5,
Period: 50,
RenewWindow: 24,
// testing SectorSize=4KiB (4096 bytes)
ExpectedStorage: modules.SectorSize * 256, //=1MiB in testing
ExpectedUpload: modules.SectorSize * 128, //=512KiB in testing
ExpectedDownload: modules.SectorSize * 128, //=512KiB in testing
ExpectedRedundancy: 5.0,
MaxPeriodChurn: modules.SectorSize * 192,
}
// testGroupBuffer is a buffer channel to control the number of testgroups
// and nodes created at once
testGroupBuffer = NewGroupBuffer(NumberOfParallelGroups)
)
// FundNodes uses the funds of a miner node to fund all the nodes of the group
func FundNodes(miner *TestNode, nodes map[*TestNode]struct{}) error {
// Get the miner's balance
wg, err := miner.WalletGet()
if err != nil {
return errors.AddContext(err, "failed to get miner's balance")
}
// Send txnsPerNode outputs to each node
txnsPerNode := uint64(25)
scos := make([]types.SiacoinOutput, 0, uint64(len(nodes))*txnsPerNode)
funding := wg.ConfirmedSiacoinBalance.Div64(uint64(len(nodes))).Div64(txnsPerNode + 1)
for node := range nodes {
wag, err := node.WalletAddressGet()
if err != nil {
return errors.AddContext(err, "failed to get wallet address")
}
for i := uint64(0); i < txnsPerNode; i++ {
scos = append(scos, types.SiacoinOutput{
Value: funding,
UnlockHash: wag.Address,
})
}
}
// Send the transaction
_, err = miner.WalletSiacoinsMultiPost(scos)
if err != nil {
return errors.AddContext(err, "failed to send funding txn")
}
// Mine the transactions
if err := miner.MineBlock(); err != nil {
return errors.AddContext(err, "failed to mine funding txn")
}
// Make sure every node has at least one confirmed transaction
for node := range nodes {
err := Retry(100, 100*time.Millisecond, func() error {
wtg, err := node.WalletTransactionsGet(0, math.MaxInt32)
if err != nil {
return err
}
if len(wtg.ConfirmedTransactions) == 0 {
return errors.New("confirmed transactions should be greater than 0")
}
return nil
})
if err != nil {
return err
}
}
return nil
}
// NewGroup creates a group of TestNodes from node params. All the nodes will
// be connected, synced and funded. Hosts nodes are also announced.
func NewGroup(groupDir string, nodeParams ...node.NodeParams) (*TestGroup, error) {
// Wait until there is an available buffer
<-testGroupBuffer
defer func() {
testGroupBuffer <- struct{}{}
}()
// Create and init group
tg := &TestGroup{
nodes: make(map[*TestNode]struct{}),
hosts: make(map[*TestNode]struct{}),
renters: make(map[*TestNode]struct{}),
miners: make(map[*TestNode]struct{}),
stopped: make(map[*TestNode]struct{}),
dir: groupDir,
}
// Create node and add it to the correct groups
nodes := make([]*TestNode, 0, len(nodeParams))
for _, np := range nodeParams {
node, err := NewCleanNode(np)
if err != nil {
return nil, errors.AddContext(err, "failed to create clean node")
}
// Add node to nodes
tg.nodes[node] = struct{}{}
nodes = append(nodes, node)
// Add node to hosts
if np.Host != nil || np.CreateHost {
tg.hosts[node] = struct{}{}
}
// Add node to renters
if np.Renter != nil || np.CreateRenter {
tg.renters[node] = struct{}{}
}
// Add node to miners
if np.Miner != nil || np.CreateMiner {
tg.miners[node] = struct{}{}
}
}
// Get a miner and mine some blocks to generate coins
if len(tg.miners) == 0 {
return nil, errors.New("cannot fund group without miners")
}
miner := tg.Miners()[0]
renewWindow := types.BlockHeight(DefaultAllowance.RenewWindow)
for i := types.BlockHeight(0); i <= types.MaturityDelay+types.TaxHardforkHeight+renewWindow; i++ {
if err := miner.MineBlock(); err != nil {
return nil, errors.AddContext(err, "failed to mine block for funding")
}
}
// Fully connect nodes
return tg, tg.setupNodes(tg.hosts, tg.nodes, tg.renters)
}
// NewGroupBuffer creates a new buffer channel and fills it
func NewGroupBuffer(size int) chan struct{} {
buffer := make(chan struct{}, size)
for i := 0; i < size; i++ {
buffer <- struct{}{}
}
return buffer
}
// NewGroupFromTemplate will create hosts, renters and miners according to the
// settings in groupParams.
func NewGroupFromTemplate(groupDir string, groupParams GroupParams) (*TestGroup, error) {
var params []node.NodeParams
// Create host params
for i := 0; i < groupParams.Hosts; i++ {
template := node.HostTemplate
template.HostAPIAddr = "localhost:0"
params = append(params, template)
randomNodeDir(groupDir, ¶ms[len(params)-1])
}
// Create renter params
for i := 0; i < groupParams.Renters; i++ {
template := node.RenterTemplate
template.HostAPIAddr = "localhost:0"
params = append(params, template)
randomNodeDir(groupDir, ¶ms[len(params)-1])
}
// Create miner params
for i := 0; i < groupParams.Miners; i++ {
template := node.MinerTemplate
template.HostAPIAddr = "localhost:0"
params = append(params, template)
randomNodeDir(groupDir, ¶ms[len(params)-1])
}
return NewGroup(groupDir, params...)
}
// addStorageFolderToHosts adds a single storage folder to each host.
func addStorageFolderToHosts(hosts map[*TestNode]struct{}) error {
errs := make([]error, len(hosts))
wg := new(sync.WaitGroup)
i := 0
// The following api call is very slow. Using multiple threads speeds that
// process up a lot.
for host := range hosts {
wg.Add(1)
go func(i int, host *TestNode) {
//DefaultAllowance.ExpectedStorage:= modules.SectorSize * 512 //=8 MiB in testing where SectorSize=4KiB (4096 bytes)
storage := 8 * contractmanager.MinimumSectorsPerStorageFolder * modules.SectorSize //same as renters expected
if host.params.HostStorage > 0 {
storage = host.params.HostStorage
}
errs[i] = host.HostStorageFoldersAddPost(host.Dir, storage)
wg.Done()
}(i, host)
i++
}
wg.Wait()
return errors.Compose(errs...)
}
// announceHosts adds storage to each host and announces them to the group
func announceHosts(hosts map[*TestNode]struct{}) error {
for host := range hosts {
if host.params.SkipHostAnnouncement {
continue
}
if err := host.HostModifySettingPost(client.HostParamAcceptingContracts, true); err != nil {
return errors.AddContext(err, "failed to set host to accepting contracts")
}
if err := host.HostAnnouncePost(); err != nil {
return errors.AddContext(err, "failed to announce host")
}
}
return nil
}
// connectNodes connects two nodes
func connectNodes(nodeA, nodeB *TestNode) error {
err := build.Retry(100, 100*time.Millisecond, func() error {
if err := nodeA.GatewayConnectPost(nodeB.GatewayAddress()); err != nil && err != client.ErrPeerExists {
return errors.AddContext(err, "failed to connect to peer")
}
isPeer1, err1 := nodeA.hasPeer(nodeB)
isPeer2, err2 := nodeB.hasPeer(nodeA)
if err1 != nil || err2 != nil {
return build.ExtendErr("couldn't determine if nodeA and nodeB are connected",
errors.Compose(err1, err2))
}
if isPeer1 && isPeer2 {
return nil
}
return errors.New("nodeA and nodeB are not peers of each other")
})
return err
}
// fullyConnectNodes takes a list of nodes and connects all their gateways
func fullyConnectNodes(nodes []*TestNode) error {
// Fully connect the nodes
for i, nodeA := range nodes {
for _, nodeB := range nodes[i+1:] {
err := connectNodes(nodeA, nodeB)
if err != nil {
return err
}
}
}
return nil
}
// hostsInRenterDBCheck makes sure that all the renters see all hosts in their
// database.
func hostsInRenterDBCheck(miner *TestNode, renters map[*TestNode]struct{}, hosts map[*TestNode]struct{}) error {
for renter := range renters {
if renter.params.SkipHostDiscovery {
continue
}
for host := range hosts {
if host.params.SkipHostAnnouncement {
continue
}
numRetries := 0
err := Retry(600, 100*time.Millisecond, func() error {
numRetries++
if renter == host {
// We don't care if the renter is also a host.
return nil
}
// Check if the renter has the host in its db.
err := errors.AddContext(renter.KnowsHost(host), "renter doesn't know host")
if err != nil && numRetries%50 == 0 {
return errors.Compose(err, miner.MineBlock())
}
if err != nil {
return err
}
return nil
})
if err != nil {
return build.ExtendErr("not all renters can see all hosts", err)
}
}
}
return nil
}
// mapToSlice converts a map of TestNodes into a slice
func mapToSlice(m map[*TestNode]struct{}) []*TestNode {
tns := make([]*TestNode, 0, len(m))
for tn := range m {
tns = append(tns, tn)
}
return tns
}
// randomNodeDir generates a random directory for the provided node params if
// Dir wasn't set using the provided parentDir and a randomized suffix.
func randomNodeDir(parentDir string, nodeParams *node.NodeParams) {
if nodeParams.Dir != "" {
return
}
nodeDir := ""
if nodeParams.Gateway != nil || nodeParams.CreateGateway {
nodeDir += "g"
}
if nodeParams.ConsensusSet != nil || nodeParams.CreateConsensusSet {
nodeDir += "c"
}
if nodeParams.TransactionPool != nil || nodeParams.CreateTransactionPool {
nodeDir += "t"
}
if nodeParams.Wallet != nil || nodeParams.CreateWallet {
nodeDir += "w"
}
if nodeParams.Renter != nil || nodeParams.CreateRenter {
nodeDir += "r"
}
if nodeParams.Host != nil || nodeParams.CreateHost {
nodeDir += "h"
}
if nodeParams.Miner != nil || nodeParams.CreateMiner {
nodeDir += "m"
}
nodeDir += fmt.Sprintf("-%s", persist.RandomSuffix())
nodeParams.Dir = filepath.Join(parentDir, nodeDir)
}
// setRenterAllowances sets the allowance of each renter
func setRenterAllowances(renters map[*TestNode]struct{}) error {
for renter := range renters {
// Set allowance
if renter.params.SkipSetAllowance {
continue
}
allowance := DefaultAllowance
if !reflect.DeepEqual(renter.params.Allowance, modules.Allowance{}) {
allowance = renter.params.Allowance
}
if err := renter.RenterPostAllowance(allowance); err != nil {
return err
}
}
return nil
}
// synchronizationCheck makes sure that all the nodes are synced and follow the
func synchronizationCheck(nodes map[*TestNode]struct{}) error {
// Get node with longest chain.
var longestChainNode *TestNode
var longestChain types.BlockHeight
for n := range nodes {
ncg, err := n.ConsensusGet()
if err != nil {
return err
}
if ncg.Height > longestChain {
longestChain = ncg.Height
longestChainNode = n
}
}
lcg, err := longestChainNode.ConsensusGet()
if err != nil {
return err
}
// Loop until all the blocks have the same CurrentBlock.
for n := range nodes {
err := Retry(600, 100*time.Millisecond, func() error {
ncg, err := n.ConsensusGet()
if err != nil {
return err
}
ngg, err := n.GatewayGet()
if err != nil {
return err
}
// If the CurrentBlock's match we are done.
if lcg.CurrentBlock == ncg.CurrentBlock {
return nil
}
// If the miner's height is greater than the node's we need to
// wait a bit longer for them to sync.
if lcg.Height != ncg.Height {
return fmt.Errorf("blockHeight doesn't match, %v vs %v (%v peers)", lcg.Height, ncg.Height, len(ngg.Peers))
}
// If the miner's height is smaller than the node's we need a
// bit longer for them to sync.
if lcg.CurrentBlock != ncg.CurrentBlock {
return errors.New("ids don't match")
}
return nil
})
if err != nil {
return err
}
}
return nil
}
// waitForContracts waits until the renters have formed contracts with the
// hosts in the group.
func waitForContracts(miner *TestNode, renters map[*TestNode]struct{}, hosts map[*TestNode]struct{}) error {
// Create a map for easier public key lookups.
hostMap := make(map[string]struct{})
for host := range hosts {
pk, err := host.HostPublicKey()
if err != nil {
return build.ExtendErr("failed to build hostMap", err)
}
hostMap[string(pk.Key)] = struct{}{}
}
// each renter is supposed to have at least expectedContracts with hosts
// from the hosts map.
for renter := range renters {
if renter.params.SkipSetAllowance {
continue
}
numRetries := 0
// Get expected number of contracts for this renter.
rg, err := renter.RenterGet()
if err != nil {
return err
}
// If there are less hosts in the group than we need we need to adjust
// our expectations.
expectedContracts := rg.Settings.Allowance.Hosts
if uint64(len(hosts)) < expectedContracts {
expectedContracts = uint64(len(hosts))
}
// Subtract hosts which the renter doesn't know yet because they
// weren't announced automatically.
for host := range hosts {
if host.params.SkipHostAnnouncement && renter.KnowsHost(host) != nil {
expectedContracts--
}
}
// Check if number of contracts is sufficient.
err = Retry(1000, 100*time.Millisecond, func() error {
numRetries++
contracts := uint64(0)
// Get the renter's contracts.
rc, err := renter.RenterInactiveContractsGet()
if err != nil {
return err
}
// Count number of contracts
for _, c := range rc.ActiveContracts {
if _, exists := hostMap[string(c.HostPublicKey.Key)]; exists {
contracts++
}
}
for _, c := range rc.InactiveContracts {
if _, exists := hostMap[string(c.HostPublicKey.Key)]; exists {
contracts++
}
}
// Check if number is sufficient
if contracts < expectedContracts {
if numRetries%100 == 0 {
if err := miner.MineBlock(); err != nil {
return err
}
}
return fmt.Errorf("renter hasn't formed enough contracts: expected %v got %v",
expectedContracts, contracts)
}
return nil
})
if err != nil {
return err
}
}
// Mine of 1 final block to ensure contracts are mined and show
// up in a block
return miner.MineBlock()
}
// AddNodeN adds n nodes of a given template to the group.
func (tg *TestGroup) AddNodeN(np node.NodeParams, n int) ([]*TestNode, error) {
nps := make([]node.NodeParams, n)
for i := 0; i < n; i++ {
np.HostAPIAddr = "localhost:0"
nps[i] = np
}
return tg.AddNodes(nps...)
}
// AddNodes creates a node and adds it to the group.
func (tg *TestGroup) AddNodes(nps ...node.NodeParams) ([]*TestNode, error) {
newNodes := make(map[*TestNode]struct{})
newHosts := make(map[*TestNode]struct{})
newRenters := make(map[*TestNode]struct{})
newMiners := make(map[*TestNode]struct{})
for i := range nps {
np := nps[i]
// Create the nodes and add them to the group.
randomNodeDir(tg.dir, &np)
node, err := NewCleanNode(np)
if err != nil {
return mapToSlice(newNodes), build.ExtendErr("failed to create new clean node", err)
}
// Add node to nodes
tg.nodes[node] = struct{}{}
newNodes[node] = struct{}{}
// Add node to hosts
if np.Host != nil || np.CreateHost {
tg.hosts[node] = struct{}{}
newHosts[node] = struct{}{}
}
// Add node to renters
if np.Renter != nil || np.CreateRenter {
tg.renters[node] = struct{}{}
newRenters[node] = struct{}{}
}
// Add node to miners
if np.Miner != nil || np.CreateMiner {
tg.miners[node] = struct{}{}
newMiners[node] = struct{}{}
}
}
return mapToSlice(newNodes), tg.setupNodes(newHosts, newNodes, newRenters)
}
// Close closes the group and all its nodes. Closing a node is usually a slow
// process, but we can speed it up a lot by closing each node in a separate
// goroutine.
func (tg *TestGroup) Close() error {
wg := new(sync.WaitGroup)
errs := make([]error, len(tg.nodes))
i := 0
for n := range tg.nodes {
_, ok := tg.stopped[n]
if ok {
// If the node is stopped, it's been closed already. Skipping here
// avoids errors that occur when calling Close() twice on testnodes.
continue
}
wg.Add(1)
go func(i int, n *TestNode) {
errs[i] = n.Close()
wg.Done()
}(i, n)
i++
}
wg.Wait()
return errors.Compose(errs...)
}
// Hosts returns all the hosts of the group. Note that the ordering of nodes in
// the slice returned is not the same across multiple calls this function.
func (tg *TestGroup) Hosts() []*TestNode {
return mapToSlice(tg.hosts)
}
// Miners returns all the miners of the group. Note that the ordering of nodes in
// the slice returned is not the same across multiple calls this function.
func (tg *TestGroup) Miners() []*TestNode {
return mapToSlice(tg.miners)
}
// Nodes returns all the nodes of the group. Note that the ordering of nodes in
// the slice returned is not the same across multiple calls this function.
func (tg *TestGroup) Nodes() []*TestNode {
return mapToSlice(tg.nodes)
}
// RemoveNode removes a node from the group and shuts it down.
func (tg *TestGroup) RemoveNode(tn *TestNode) error {
// Remove node from all data structures.
delete(tg.nodes, tn)
delete(tg.hosts, tn)
delete(tg.renters, tn)
delete(tg.miners, tn)
// Close node.
return tn.StopNode()
}
// Renters returns all the renters of the group. Note that the ordering of nodes in
// the slice returned is not the same across multiple calls this function.
func (tg *TestGroup) Renters() []*TestNode {
return mapToSlice(tg.renters)
}
// RestartNode stops a node and then starts it again while conducting a few
// checks and guaranteeing that the node is connected to the group afterwards.
func (tg *TestGroup) RestartNode(tn *TestNode) error {
if err := tg.StopNode(tn); err != nil {
return err
}
return tg.StartNode(tn)
}
// SetRenterAllowance finished the setup for the renter test node
func (tg *TestGroup) SetRenterAllowance(renter *TestNode, allowance modules.Allowance) error {
if _, ok := tg.renters[renter]; !ok {
return errors.New("Can not set allowance for renter not in test group")
}
miner := mapToSlice(tg.miners)[0]
r := make(map[*TestNode]struct{})
r[renter] = struct{}{}
// Set renter allowances
renter.params.SkipSetAllowance = false
if err := setRenterAllowances(r); err != nil {
return build.ExtendErr("failed to set renter allowance", err)
}
// Wait for all the renters to form contracts if the haven't got enough
// contracts already.
if err := waitForContracts(miner, r, tg.hosts); err != nil {
return build.ExtendErr("renters failed to form contracts", err)
}
// Make sure all nodes are synced
if err := synchronizationCheck(tg.nodes); err != nil {
return build.ExtendErr("synchronization check 2 failed", err)
}
return nil
}
// StartNode starts a node from the group that has previously been stopped.
func (tg *TestGroup) StartNode(tn *TestNode) error {
if _, exists := tg.nodes[tn]; !exists {
return errors.New("cannot start node that's not part of the group")
}
err := tn.StartNode()
if err != nil {
return err
}
delete(tg.stopped, tn)
if err := fullyConnectNodes(tg.Nodes()); err != nil {
return err
}
return synchronizationCheck(tg.nodes)
}
// StartNodeCleanDeps starts a node from the group that has previously been
// stopped without its previously assigned dependencies.
func (tg *TestGroup) StartNodeCleanDeps(tn *TestNode) error {
if _, exists := tg.nodes[tn]; !exists {
return errors.New("cannot start node that's not part of the group")
}
err := tn.StartNodeCleanDeps()
if err != nil {
return err
}
if err := fullyConnectNodes(tg.Nodes()); err != nil {
return err
}
return synchronizationCheck(tg.nodes)
}
// StopNode stops a node of a group.
func (tg *TestGroup) StopNode(tn *TestNode) error {
if _, exists := tg.nodes[tn]; !exists {
return errors.New("cannot stop node that's not part of the group")
}
tg.stopped[tn] = struct{}{}
return tn.StopNode()
}
// Sync makes sure that the test group's nodes are synchronized
func (tg *TestGroup) Sync() error {
return synchronizationCheck(tg.nodes)
}
// setupNodes does the set up required for creating a test group
// and add nodes to a group
func (tg *TestGroup) setupNodes(setHosts, setNodes, setRenters map[*TestNode]struct{}) error {
// Find richest miner.
var miner *TestNode
var balance types.Currency
for m := range tg.miners {
wg, err := m.WalletGet()
if err != nil {
return errors.New("failed to find richest miner")
}
if wg.ConfirmedSiacoinBalance.Cmp(balance) > 0 {
miner = m
balance = wg.ConfirmedSiacoinBalance
}
}
// Get all the nodes.
nodes := mapToSlice(tg.nodes)
if err := fullyConnectNodes(nodes); err != nil {
return build.ExtendErr("failed to fully connect nodes", err)
}
// Make sure the new nodes are synced.
if err := synchronizationCheck(tg.nodes); err != nil {
return build.ExtendErr("synchronization check 1 failed", err)
}
// Fund nodes.
if err := FundNodes(miner, setNodes); err != nil {
return build.ExtendErr("failed to fund new hosts", err)
}
// Add storage to host
if err := addStorageFolderToHosts(setHosts); err != nil {
return build.ExtendErr("failed to add storage to hosts", err)
}
// Announce host
if err := announceHosts(setHosts); err != nil {
return build.ExtendErr("failed to announce hosts", err)
}
// Mine a block to get the announcements confirmed
if err := miner.MineBlock(); err != nil {
return build.ExtendErr("failed to mine host announcements", err)
}
// Block until the hosts show up as active in the renters' hostdbs
if err := hostsInRenterDBCheck(miner, tg.renters, tg.hosts); err != nil {
return build.ExtendErr("renter database check failed", err)
}
// Set renter allowances
if err := setRenterAllowances(setRenters); err != nil {
return build.ExtendErr("failed to set renter allowance", err)
}
// Wait for all the renters to form contracts if the haven't got enough
// contracts already.
if err := waitForContracts(miner, tg.renters, tg.hosts); err != nil {
return build.ExtendErr("renters failed to form contracts", err)
}
// Make sure all nodes are synced
if err := synchronizationCheck(tg.nodes); err != nil {
return build.ExtendErr("synchronization check 2 failed", err)
}
return nil
}