Commit bb0b4780 authored by Luke Champine's avatar Luke Champine Committed by GitHub

Merge pull request #1694 from NebulousLabs/benches

add some persistence benchmarks to test various systems
parents 83695935 3ad08b81
......@@ -12,7 +12,7 @@ install:
- glyphcheck ./...
- make
script: make test && make test-long && make cover
script: make test && make test-long && make cover && make bench
sudo: false
......
......@@ -22,3 +22,5 @@ install:
build_script:
- go test -tags="testing debug" -short ./...
- go test -tags="testing debug" -v -race ./...
- go test -tags="testing debug" -v -race ./...
- go test -tags="testing debug" -v -timeout=500s -run=XXX -bench=. ./...
......@@ -2,6 +2,7 @@ package consensus
import (
"path/filepath"
"strconv"
"testing"
"github.com/NebulousLabs/Sia/build"
......@@ -15,7 +16,7 @@ import (
//
// i7-4770, 1d60d69: 1.356 ms / op
func BenchmarkAcceptEmptyBlocks(b *testing.B) {
cst, err := createConsensusSetTester(b.Name())
cst, err := createConsensusSetTester(b.Name() + strconv.Itoa(b.N))
if err != nil {
b.Fatal("Error creating tester: " + err.Error())
}
......@@ -77,7 +78,7 @@ func BenchmarkAcceptEmptyBlocks(b *testing.B) {
//
// i7-4770, 1d60d69: 3.579 ms / op
func BenchmarkAcceptSmallBlocks(b *testing.B) {
cst, err := createConsensusSetTester(b.Name())
cst, err := createConsensusSetTester(b.Name() + strconv.Itoa(b.N))
if err != nil {
b.Fatal(err)
}
......
......@@ -13,7 +13,7 @@ import (
// i7-4770, 1d60d69: 22.883 ms / op
func BenchmarkCreateServerTester(b *testing.B) {
for i := 0; i < b.N; i++ {
cst, err := createConsensusSetTester(b.Name() + strconv.Itoa(i))
cst, err := createConsensusSetTester(b.Name() + strconv.Itoa(b.N) + strconv.Itoa(i))
if err != nil {
b.Fatal(err)
}
......
package persist
// disk_test.go probes some of the disk operations that are very commonly used
// within Sia. Namely, Read, Write, Truncate, WriteAt(rand), ReadAt(rand).
import (
"os"
"path/filepath"
"testing"
"github.com/NebulousLabs/Sia/build"
"github.com/NebulousLabs/fastrand"
)
// BenchmarkWrite512MiB checks how long it takes to write 512MiB sequentially.
func BenchmarkWrite512MiB(b *testing.B) {
testDir := build.TempDir("persist", b.Name())
err := os.MkdirAll(testDir, 0700)
if err != nil {
b.Fatal(err)
}
b.SetBytes(1 << 29)
filename := filepath.Join(testDir, "512MiB.file")
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Make the file.
f, err := os.Create(filename)
if err != nil {
b.Fatal(err)
}
// 2^12 writes of 4MiB.
for i := 0; i < 1<<7; i++ {
// Get the entropy separate from the timer.
b.StopTimer()
data := fastrand.Bytes(1 << 22)
b.StartTimer()
_, err = f.Write(data)
if err != nil {
b.Fatal(err)
}
// Sync after every write.
err = f.Sync()
if err != nil {
b.Fatal(err)
}
}
// Close the file before iterating.
err = f.Close()
if err != nil {
b.Fatal(err)
}
}
err = os.Remove(filename)
if err != nil {
b.Fatal(err)
}
}
// BenchmarkWrite512MiBTrunc checks how long it takes to write 512MiB using
// stepwise truncate.
func BenchmarkWrite512MiBTrunc(b *testing.B) {
testDir := build.TempDir("persist", b.Name())
err := os.MkdirAll(testDir, 0700)
if err != nil {
b.Fatal(err)
}
b.SetBytes(1 << 29)
filename := filepath.Join(testDir, "512MiB.file")
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Make the file.
f, err := os.Create(filename)
if err != nil {
b.Fatal(err)
}
// 2^12 writes of 4MiB.
for i := 0; i < 1<<7; i++ {
// Extend the file through truncation.
err = f.Truncate(int64((i + 1) * 1 << 22))
if err != nil {
b.Fatal(err)
}
// Sync after every write.
err = f.Sync()
if err != nil {
b.Fatal(err)
}
}
// Close the file before iterating.
err = f.Close()
if err != nil {
b.Fatal(err)
}
}
err = os.Remove(filename)
if err != nil {
b.Fatal(err)
}
}
// BenchmarkWrite512MiBRand checks how long it takes to write 512MiB randomly.
func BenchmarkWrite512MiBRand(b *testing.B) {
testDir := build.TempDir("persist", b.Name())
err := os.MkdirAll(testDir, 0700)
if err != nil {
b.Fatal(err)
}
b.SetBytes(1 << 29)
filename := filepath.Join(testDir, "512MiB.file")
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Make the file.
f, err := os.Create(filename)
if err != nil {
b.Fatal(err)
}
// 2^7 writes of 4MiB.
for i := 0; i < 1<<7; i++ {
// Get the entropy separate from the timer.
b.StopTimer()
data := fastrand.Bytes(1 << 22)
offset := int64(fastrand.Intn(1 << 6))
offset *= 1 << 22
b.StartTimer()
_, err = f.WriteAt(data, offset)
if err != nil {
b.Fatal(err)
}
// Sync after every write.
err = f.Sync()
if err != nil {
b.Fatal(err)
}
}
// Close the file before iterating.
err = f.Close()
if err != nil {
b.Fatal(err)
}
}
err = os.Remove(filename)
if err != nil {
b.Fatal(err)
}
}
// BenchmarkRead512MiB checks how long it takes to read 512MiB sequentially.
func BenchmarkRead512MiB(b *testing.B) {
testDir := build.TempDir("persist", b.Name())
err := os.MkdirAll(testDir, 0700)
if err != nil {
b.Fatal(err)
}
b.SetBytes(1 << 29)
// Make the file.
filename := filepath.Join(testDir, "512MiB.file")
f, err := os.Create(filename)
if err != nil {
b.Fatal(err)
}
// 2^7 writes of 4MiB.
for i := 0; i < 1<<7; i++ {
// Get the entropy separate from the timer.
b.StopTimer()
data := fastrand.Bytes(1 << 22)
b.StartTimer()
_, err = f.Write(data)
if err != nil {
b.Fatal(err)
}
// Sync after every write.
err = f.Sync()
if err != nil {
b.Fatal(err)
}
}
// Close the file.
err = f.Close()
if err != nil {
b.Fatal(err)
}
// Check the sequential read speed.
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Open the file.
f, err := os.Open(filename)
if err != nil {
b.Fatal(err)
}
// Read the file 4 MiB at a time.
for i := 0; i < 1<<6; i++ {
data := make([]byte, 1<<22)
_, err = f.Read(data)
if err != nil {
b.Fatal(err)
}
}
err = f.Close()
if err != nil {
b.Fatal(err)
}
}
err = os.Remove(filename)
if err != nil {
b.Fatal(err)
}
}
// BenchmarkRead512MiBRand checks how long it takes to read 512MiB randomly.
func BenchmarkRead512MiBRand(b *testing.B) {
testDir := build.TempDir("persist", b.Name())
err := os.MkdirAll(testDir, 0700)
if err != nil {
b.Fatal(err)
}
b.SetBytes(1 << 29)
// Make the file.
filename := filepath.Join(testDir, "512MiB.file")
f, err := os.Create(filename)
if err != nil {
b.Fatal(err)
}
// 2^7 writes of 4MiB.
for i := 0; i < 1<<7; i++ {
// Get the entropy separate from the timer.
b.StopTimer()
data := fastrand.Bytes(1 << 22)
b.StartTimer()
_, err = f.Write(data)
if err != nil {
b.Fatal(err)
}
// Sync after every write.
err = f.Sync()
if err != nil {
b.Fatal(err)
}
}
// Close the file.
err = f.Close()
if err != nil {
b.Fatal(err)
}
// Check the sequential read speed.
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Open the file.
f, err := os.Open(filename)
if err != nil {
b.Fatal(err)
}
// Read the file 4 MiB at a time.
for i := 0; i < 1<<6; i++ {
offset := int64(fastrand.Intn(1 << 6))
offset *= 1 << 22
data := make([]byte, 1<<22)
_, err = f.ReadAt(data, offset)
if err != nil {
b.Fatal(err)
}
}
err = f.Close()
if err != nil {
b.Fatal(err)
}
}
err = os.Remove(filename)
if err != nil {
b.Fatal(err)
}
}
// BenchmarkTruncate512MiB checks how long it takes to truncate a 512 MiB file.
func BenchmarkTruncate512MiB(b *testing.B) {
testDir := build.TempDir("persist", b.Name())
err := os.MkdirAll(testDir, 0700)
if err != nil {
b.Fatal(err)
}
b.SetBytes(1 << 29)
filename := filepath.Join(testDir, "512MiB.file")
// Check the truncate speed.
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Make the file separate from the timer.
b.StopTimer()
f, err := os.Create(filename)
if err != nil {
b.Fatal(err)
}
// 2^7 writes of 4MiB.
for i := 0; i < 1<<7; i++ {
// Get the entropy separate from the timer.
b.StopTimer()
data := fastrand.Bytes(1 << 22)
b.StartTimer()
_, err = f.Write(data)
if err != nil {
b.Fatal(err)
}
}
// Sync after writing.
err = f.Sync()
if err != nil {
b.Fatal(err)
}
// Close the file.
err = f.Close()
if err != nil {
b.Fatal(err)
}
b.StartTimer()
// Open the file.
f, err = os.OpenFile(filename, os.O_RDWR, 0600)
if err != nil {
b.Fatal(err)
}
// Truncate the file.
err = f.Truncate(0)
if err != nil {
b.Fatal(err)
}
// Sync.
err = f.Sync()
if err != nil {
b.Fatal(err)
}
// Close.
err = f.Close()
if err != nil {
b.Fatal(err)
}
}
err = os.Remove(filename)
if err != nil {
b.Fatal(err)
}
}
......@@ -121,7 +121,7 @@ func TestTryMutexTimed(t *testing.T) {
if wait < time.Millisecond*450 {
t.Error("lock did not wait the correct amount of time before timing out", wait)
}
if wait > time.Millisecond*700 {
if wait > time.Millisecond*900 {
t.Error("lock waited too long before timing out", wait)
}
......@@ -154,7 +154,7 @@ func TestTryMutexTimedConcurrent(t *testing.T) {
if wait < time.Millisecond*450 {
t.Error("lock did not wait the correct amount of time before timing out:", wait)
}
if wait > time.Millisecond*800 {
if wait > time.Millisecond*900 {
t.Error("lock waited too long before timing out", wait)
}
......@@ -167,9 +167,9 @@ func TestTryMutexTimedConcurrent(t *testing.T) {
// the lock will be idle for 500 milliseconds.
t.Error("Lock should have timed out")
}
if !tm.TryLockTimed(time.Millisecond * 750) {
if !tm.TryLockTimed(time.Millisecond * 950) {
// Lock should be successful - the above thread should finish in under
// 750 milliseconds.
// 950 milliseconds.
t.Error("Lock should have been successful")
}
tm.Unlock()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment