ethash: split into light and full client

parent 7c307266
Pipeline #17927187 passed with stages
in 11 minutes and 8 seconds
......@@ -15,46 +15,65 @@ import (
var EthashEpochLength uint64 = 30000
type Ethash struct {
Cache []byte
type Light struct {
Cache []byte
DataSize int
seed C.ethash_h256_t
light C.ethash_light_t
}
type Full struct {
Light *Light
DAG []byte
light C.ethash_light_t
full C.ethash_full_t
}
func NewEthash(seedhash []byte) (*Ethash, error) {
func NewLight(seedhash []byte) (*Light, error) {
blockNumber, err := seedHashToBlockNum(seedhash)
if err != nil {
return nil, err
}
sh := hashToH256(seedhash)
seed := hashToH256(seedhash)
light := C.ethash_light_new_internal(C.ethash_get_cachesize(C.uint64_t(blockNumber)), &sh)
light := C.ethash_light_new_internal(C.ethash_get_cachesize(C.uint64_t(blockNumber)), &seed)
light.block_number = C.uint64_t(blockNumber)
cache := C.GoBytes(unsafe.Pointer(light.cache), C.int(light.cache_size))
datasize := int(C.ethash_get_datasize(light.block_number))
return &Light{
cache,
datasize,
seed,
light,
}, nil
}
func NewFull(light *Light) (*Full, error) {
dir := make([]byte, 256)
if !C.ethash_get_default_dirname((*C.char)(unsafe.Pointer(&dir[0])), 256) {
return nil, fmt.Errorf("failed to determine ethash dag storage directory")
}
cache := C.GoBytes(unsafe.Pointer(light.cache), C.int(light.cache_size))
fullsize := C.ethash_get_datasize(light.block_number)
full := C.ethash_full_new_internal((*C.char)(unsafe.Pointer(&dir[0])), sh, fullsize, light, nil)
fullsize := C.ethash_get_datasize(light.light.block_number)
full := C.ethash_full_new_internal((*C.char)(unsafe.Pointer(&dir[0])), light.seed, fullsize, light.light, nil)
dag := C.GoBytes(unsafe.Pointer(C.ethash_full_dag(full)), C.int(C.ethash_full_dag_size(full)/4))
return &Ethash{
cache,
dag,
return &Full{
light,
dag,
full,
}, nil
}
func (e *Ethash) Release() {
func (e *Light) Release() {
C.ethash_light_delete(e.light)
}
func (e *Full) Release() {
C.ethash_full_delete(e.full)
}
......
......@@ -49,8 +49,8 @@ func DiffToTarget(diff float32) *big.Int {
return new(big.Int).SetBytes(t)
}
func (work *Work) Verify(hash *Ethash, nonce uint64) (bool, error) {
ret := C.ethash_full_compute(hash.full, hashToH256(work.Header), C.uint64_t(nonce))
func (work *Work) Verify(full *Full, nonce uint64) (bool, error) {
ret := C.ethash_full_compute(full.full, hashToH256(work.Header), C.uint64_t(nonce))
success, result := bool(ret.success), h256ToHash(ret.result)
if !success {
return false, fmt.Errorf("ethash compute failed")
......@@ -60,8 +60,8 @@ func (work *Work) Verify(hash *Ethash, nonce uint64) (bool, error) {
return false, nil
}
func (work *Work) VerifySend(hash *Ethash, nonce uint64, results chan<- Share) (bool, error) {
if ok, err := work.Verify(hash, work.ExtraNonce+nonce); ok {
func (work *Work) VerifySend(full *Full, nonce uint64, results chan<- Share) (bool, error) {
if ok, err := work.Verify(full, work.ExtraNonce+nonce); ok {
results <- Share{
JobId: work.JobId,
Nonce: nonce,
......@@ -72,10 +72,10 @@ func (work *Work) VerifySend(hash *Ethash, nonce uint64, results chan<- Share) (
}
}
func (work *Work) VerifyRange(hash *Ethash, start uint64, size uint64, results chan<- Share) error {
func (work *Work) VerifyRange(full *Full, start uint64, size uint64, results chan<- Share) error {
end := start + size
for i := start; i < end; i++ {
if _, err := work.VerifySend(hash, i, results); err != nil {
if _, err := work.VerifySend(full, i, results); err != nil {
return err
}
}
......
......@@ -55,7 +55,17 @@ func (worker *Ethash) Start() error {
defer close(workChannels[index])
}
var hash *ethash.Ethash
var light *ethash.Light
var full *ethash.Full
defer func() {
if light != nil {
light.Release()
}
if full != nil {
full.Release()
}
}()
var seedhash string
for work := range worker.Work {
......@@ -71,28 +81,40 @@ func (worker *Ethash) Start() error {
for i := 0; i < totalThreads; i++ {
close(workChannels[i])
workChannels[i] = make(chan *ethash.Work, 1)
if hash != nil {
hash.Release()
if light != nil {
light.Release()
light = nil
}
if full != nil {
full.Release()
full = nil
}
}
log.Info("DAG is being initialized, this may take a while")
hash, err = ethash.NewEthash(seedhashBytes)
light, err = ethash.NewLight(seedhashBytes)
if err != nil {
return err
}
log.Info("DAG initialized")
if len(worker.config.Processors) > 0 {
log.Info("DAG is being initialized, this may take a while")
full, err = ethash.NewFull(light)
if err != nil {
return err
}
log.Info("DAG initialized")
}
for cpuIndex, conf := range worker.config.Processors {
for i := 0; i < conf.Threads; i++ {
key := []string{"cpu", fmt.Sprintf("%v", cpuIndex), fmt.Sprintf("%v", i)}
go worker.thread(key, hash, workChannels[len(worker.config.CLDevices)+i])
go worker.thread(key, full, workChannels[len(worker.config.CLDevices)+i])
}
}
if len(worker.config.CLDevices) > 0 {
for i, d := range worker.config.CLDevices {
cl, err := newEthashCL(d, hash)
cl, err := newEthashCL(d, light)
if err != nil {
return err
}
......@@ -111,7 +133,7 @@ func (worker *Ethash) Start() error {
return nil
}
func (worker *Ethash) thread(key []string, hash *ethash.Ethash, workChan chan *ethash.Work) {
func (worker *Ethash) thread(key []string, full *ethash.Full, workChan chan *ethash.Work) {
work := <-workChan
var ok bool
......@@ -128,7 +150,7 @@ func (worker *Ethash) thread(key []string, hash *ethash.Ethash, workChan chan *e
default:
start := time.Now()
if err := work.VerifyRange(hash, nonce, stepping, worker.Shares); err != nil {
if err := work.VerifyRange(full, nonce, stepping, worker.Shares); err != nil {
workerError(err)
}
nonce += stepping
......
......@@ -29,7 +29,7 @@ type ethashCL struct {
globalWorkSize int
}
func newEthashCL(config CLDeviceConfig, ethash *ethash.Ethash) (*ethashCL, error) {
func newEthashCL(config CLDeviceConfig, light *ethash.Light) (*ethashCL, error) {
kernel, err := packr.NewBox("../opencl").MustString("ethash.cl")
if err != nil {
return nil, err
......@@ -37,7 +37,7 @@ func newEthashCL(config CLDeviceConfig, ethash *ethash.Ethash) (*ethashCL, error
device := config.Device.CL()
if int(config.Device.CL().GlobalMemSize()) < len(ethash.DAG) {
if int(config.Device.CL().GlobalMemSize()) < light.DataSize {
return nil, fmt.Errorf("GPU has insufficient memory to fit DAG")
}
......@@ -60,15 +60,15 @@ func newEthashCL(config CLDeviceConfig, ethash *ethash.Ethash) (*ethashCL, error
}
// TODO CreateBuffer results in Invalid Host Ptr, might be a bug in the bindings
cache, err := ctx.CreateEmptyBuffer(cl.MemReadOnly, len(ethash.Cache))
cache, err := ctx.CreateEmptyBuffer(cl.MemReadOnly, len(light.Cache))
if err != nil {
return nil, errors.WithStack(err)
}
if _, err := queue.EnqueueWriteBuffer(cache, true, 0, len(ethash.Cache), unsafe.Pointer(&ethash.Cache[0]), nil); err != nil {
if _, err := queue.EnqueueWriteBuffer(cache, true, 0, len(light.Cache), unsafe.Pointer(&light.Cache[0]), nil); err != nil {
return nil, errors.WithStack(err)
}
dag, err := ctx.CreateEmptyBuffer(cl.MemReadOnly, len(ethash.DAG))
dag, err := ctx.CreateEmptyBuffer(cl.MemReadOnly, light.DataSize)
if err != nil {
return nil, errors.WithStack(err)
}
......@@ -86,8 +86,8 @@ func newEthashCL(config CLDeviceConfig, ethash *ethash.Ethash) (*ethashCL, error
options := []string{
fmt.Sprintf("-D%v=%v", "PLATFORM", 0), // TODO 1 for AMD, 2 for NVIDIA
fmt.Sprintf("-D%v=%v", "GROUP_SIZE", workgroupSize),
fmt.Sprintf("-D%v=%v", "DAG_SIZE", len(ethash.DAG)/128),
fmt.Sprintf("-D%v=%v", "LIGHT_SIZE", len(ethash.Cache)/64), // TODO what's the right size?
fmt.Sprintf("-D%v=%v", "DAG_SIZE", light.DataSize/128),
fmt.Sprintf("-D%v=%v", "LIGHT_SIZE", len(light.Cache)/64), // TODO what's the right size?
//fmt.Sprintf("-D%v=%v", "ACCESSES", workgroupSize), TODO??
fmt.Sprintf("-D%v=%v", "MAX_OUTPUTS", 1),
// fmt.Sprintf("-D%v=%v", "PLATFORM", workgroupSize), TODO!!
......@@ -127,7 +127,7 @@ func newEthashCL(config CLDeviceConfig, ethash *ethash.Ethash) (*ethashCL, error
return nil, errors.WithStack(err)
}
work := len(ethash.DAG) / 128
work := light.DataSize / 128
fullRuns := work / globalWorkSize
restWork := work % globalWorkSize
if restWork > 0 {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment