switch to fastrand

parent dbf760f0
......@@ -8,6 +8,7 @@ dependencies:
# Consensus Dependencies
go get -u github.com/NebulousLabs/demotemutex
go get -u github.com/NebulousLabs/ed25519
go get -u github.com/NebulousLabs/fastrand
go get -u github.com/NebulousLabs/merkletree
go get -u github.com/NebulousLabs/bolt
go get -u github.com/dchest/blake2b
......
package crypto
import (
"crypto/rand"
"hash"
"io"
"math"
"math/big"
"sync"
"unsafe"
)
// A randReader produces random values via repeated hashing. The entropy field
// is the concatenation of an initial seed and a 128-bit counter. Each time
// the entropy is hashed, the counter is incremented.
type randReader struct {
entropy [16 + HashSize]byte
h hash.Hash
buf [32]byte
mu sync.Mutex
}
// Read fills b with random data. It always returns len(b), nil.
func (r *randReader) Read(b []byte) (int, error) {
r.mu.Lock()
n := 0
for n < len(b) {
// Increment counter.
*(*uint64)(unsafe.Pointer(&r.entropy[0]))++
if *(*uint64)(unsafe.Pointer(&r.entropy[0])) == 0 {
*(*uint64)(unsafe.Pointer(&r.entropy[8]))++
}
// Hash the counter + initial seed.
r.h.Reset()
r.h.Write(r.entropy[:])
r.h.Sum(r.buf[:0])
// Fill out 'b'.
n += copy(b[n:], r.buf[:])
}
r.mu.Unlock()
return n, nil
}
"github.com/NebulousLabs/fastrand"
)
// Reader is a global, shared instance of a cryptographically strong pseudo-
// random generator. Reader is safe for concurrent use by multiple goroutines.
var Reader = func() *randReader {
r := &randReader{h: NewHash()}
// Use 64 bytes in case the first 32 aren't completely random.
_, err := io.CopyN(r.h, rand.Reader, 64)
if err != nil {
panic("crypto: no entropy available")
}
r.h.Sum(r.entropy[16:])
return r
}()
var Reader = fastrand.Reader
// Read is a helper function that calls Reader.Read on b. It always fills b
// completely.
func Read(b []byte) { Reader.Read(b) }
func Read(b []byte) { fastrand.Read(b) }
// Bytes is a helper function that returns n bytes of random data.
func RandBytes(n int) []byte {
b := make([]byte, n)
Read(b)
return b
}
func RandBytes(n int) []byte { return fastrand.Bytes(n) }
// RandIntn returns a uniform random value in [0,n). It panics if n <= 0.
func RandIntn(n int) int {
if n <= 0 {
panic("crypto: argument to Intn is <= 0")
}
// To eliminate modulo bias, keep selecting at random until we fall within
// a range that is evenly divisible by n.
// NOTE: since n is at most math.MaxUint64/2, max is minimized when:
// n = math.MaxUint64/4 + 1 -> max = math.MaxUint64 - math.MaxUint64/4
// This gives an expected 1.333 tries before choosing a value < max.
max := math.MaxUint64 - math.MaxUint64%uint64(n)
b := RandBytes(8)
r := *(*uint64)(unsafe.Pointer(&b[0]))
for r >= max {
Read(b)
r = *(*uint64)(unsafe.Pointer(&b[0]))
}
return int(r % uint64(n))
return fastrand.Intn(n)
}
// RandBigIntn returns a uniform random value in [0,n). It panics if n <= 0.
func RandBigIntn(n *big.Int) *big.Int {
i, _ := rand.Int(Reader, n)
return i
}
func RandBigIntn(n *big.Int) *big.Int { return fastrand.BigIntn(n) }
// Perm returns a random permutation of the integers [0,n).
func Perm(n int) []int {
m := make([]int, n)
for i := 1; i < n; i++ {
j := RandIntn(i + 1)
m[i] = m[j]
m[j] = i
}
return m
}
func Perm(n int) []int { return fastrand.Perm(n) }
package crypto
import (
"crypto/rand"
"math"
"testing"
"time"
)
// BenchmarkRandIntn benchmarks the RandIntn function for small ints.
func BenchmarkRandIntn(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = RandIntn(4e3)
}
}
// BenchmarkRandIntnLarge benchmarks the RandIntn function for large ints.
func BenchmarkRandIntnLarge(b *testing.B) {
for i := 0; i < b.N; i++ {
// constant chosen to trigger resampling (see RandIntn)
_ = RandIntn(math.MaxUint64/4 + 1)
}
}
// BenchmarkRead benchmarks the speed of Read for small slices.
func BenchmarkRead32(b *testing.B) {
b.SetBytes(32)
buf := make([]byte, 32)
for i := 0; i < b.N; i++ {
Read(buf)
}
}
// BenchmarkRead64K benchmarks the speed of Read for larger slices.
func BenchmarkRead64K(b *testing.B) {
b.SetBytes(64e3)
buf := make([]byte, 64e3)
for i := 0; i < b.N; i++ {
Read(buf)
}
}
// BenchmarkReadContention benchmarks the speed of Read when 4 other
// goroutines are calling RandIntn in a tight loop.
func BenchmarkReadContention(b *testing.B) {
b.SetBytes(32)
for j := 0; j < 4; j++ {
go func() {
for {
RandIntn(1)
time.Sleep(time.Microsecond)
}
}()
}
buf := make([]byte, 32)
b.ResetTimer()
for i := 0; i < b.N; i++ {
Read(buf)
}
}
// BenchmarkReadCrypto benchmarks the speed of (crypto/rand).Read for small
// slices. This establishes a lower limit for BenchmarkRead32.
func BenchmarkReadCrypto32(b *testing.B) {
b.SetBytes(32)
buf := make([]byte, 32)
for i := 0; i < b.N; i++ {
rand.Read(buf)
}
}
// BenchmarkReadCrypto64K benchmarks the speed of (crypto/rand).Read for larger
// slices. This establishes a lower limit for BenchmarkRead64K.
func BenchmarkReadCrypto64K(b *testing.B) {
b.SetBytes(64e3)
buf := make([]byte, 64e3)
for i := 0; i < b.N; i++ {
rand.Read(buf)
}
}
// BenchmarkPerm benchmarks the speed of Perm for small slices.
func BenchmarkPerm32(b *testing.B) {
for i := 0; i < b.N; i++ {
Perm(32)
}
}
// BenchmarkPermLarge benchmarks the speed of Perm for large slices.
func BenchmarkPermLarge4k(b *testing.B) {
for i := 0; i < b.N; i++ {
Perm(4e3)
}
}
package crypto
import (
"bytes"
"compress/gzip"
"testing"
)
// panics returns true if the function fn panicked.
func panics(fn func()) (panicked bool) {
defer func() {
panicked = (recover() != nil)
}()
fn()
return
}
// TestRandIntnPanics tests that RandIntn panics if n <= 0.
func TestRandIntnPanics(t *testing.T) {
// Test n = 0.
if !panics(func() { RandIntn(0) }) {
t.Error("expected panic for n <= 0")
}
// Test n < 0.
if !panics(func() { RandIntn(-1) }) {
t.Error("expected panic for n <= 0")
}
}
// TestRandIntn tests the RandIntn function.
func TestRandIntn(t *testing.T) {
const iters = 10000
var counts [10]int
for i := 0; i < iters; i++ {
counts[RandIntn(len(counts))]++
}
exp := iters / len(counts)
lower, upper := exp-(exp/10), exp+(exp/10)
for i, n := range counts {
if !(lower < n && n < upper) {
t.Errorf("Expected range of %v-%v for index %v, got %v", lower, upper, i, n)
}
}
}
// TestRead tests that Read produces output with sufficiently high entropy.
func TestRead(t *testing.T) {
const size = 10e3
var b bytes.Buffer
zip, _ := gzip.NewWriterLevel(&b, gzip.BestCompression)
if _, err := zip.Write(RandBytes(size)); err != nil {
t.Fatal(err)
}
if err := zip.Close(); err != nil {
t.Fatal(err)
}
if b.Len() < size {
t.Error("supposedly high entropy bytes have been compressed!")
}
}
// TestPerm tests the Perm function.
func TestPerm(t *testing.T) {
chars := "abcde" // string to be permuted
createPerm := func() string {
s := make([]byte, len(chars))
for i, j := range Perm(len(chars)) {
s[i] = chars[j]
}
return string(s)
}
// create (factorial(len(chars)) * 100) permutations
permCount := make(map[string]int)
for i := 0; i < 12000; i++ {
permCount[createPerm()]++
}
// we should have seen each permutation approx. 100 times
for p, n := range permCount {
if n < 50 || n > 150 {
t.Errorf("saw permutation %v times: %v", n, p)
}
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment