Commit b0ad717b authored by Luke Champine's avatar Luke Champine

simultaneous transfer+Merkle

parent 26a7acbf
......@@ -60,7 +60,7 @@ func (s *State) validProof(sp StorageProof) error {
if err != nil {
return err
}
verified := hash.VerifyReaderProof(
verified := hash.VerifySegment(
sp.Segment,
sp.HashSet,
hash.CalculateSegments(contract.FileSize),
......
......@@ -7,8 +7,7 @@ import (
)
const (
HashSize = 32
SegmentSize = 64 // Size of smallest piece of a file which gets hashed when building the Merkle tree.
HashSize = 32
)
type (
......
......@@ -64,7 +64,7 @@ func TestStorageProof(t *testing.T) {
numSegments := uint64(7)
data := make([]byte, numSegments*SegmentSize)
rand.Read(data)
rootHash, err := ReaderMerkleRoot(bytes.NewReader(data), numSegments)
rootHash, err := BytesMerkleRoot(data)
if err != nil {
t.Fatal(err)
}
......@@ -76,7 +76,7 @@ func TestStorageProof(t *testing.T) {
t.Error(err)
continue
}
if !VerifyReaderProof(baseSegment, hashSet, numSegments, i, rootHash) {
if !VerifySegment(baseSegment, hashSet, numSegments, i, rootHash) {
t.Error("Proof", i, "did not pass verification")
}
}
......
package hash
const (
SegmentSize = 64 // number of bytes that are hashed to form each base leaf of the Merkle tree
)
// Helper function for Merkle trees; takes two hashes, concatenates them,
// and hashes the result.
func JoinHash(left, right Hash) Hash {
......
......@@ -6,19 +6,33 @@ import (
"io"
)
// BytesMerkleRoot takes a byte slice and returns the merkle root created by
// Calculates the number of segments in the file when building a Merkle tree.
// Should probably be renamed to CountLeaves() or something.
//
// TODO: Why is this in package hash?
func CalculateSegments(fileSize uint64) (numSegments uint64) {
numSegments = fileSize / SegmentSize
if fileSize%SegmentSize != 0 {
numSegments++
}
return
}
// BytesMerkleRoot takes a byte slice and returns the Merkle root created by
// splitting the slice into small pieces and then treating each piece as an
// element of the tree.
func BytesMerkleRoot(data []byte) (hash Hash, err error) {
reader := bytes.NewReader(data)
numSegments := CalculateSegments(uint64(len(data)))
return ReaderMerkleRoot(reader, numSegments)
func BytesMerkleRoot(data []byte) (Hash, error) {
return ReaderMerkleRoot(bytes.NewReader(data), uint64(len(data)))
}
// ReaderMerkleRoot splits the provided data into segments. It then recursively
// transforms these segments into a Merkle tree, and returns the root hash.
// See MerkleRoot for a diagram of how Merkle trees are constructed.
func ReaderMerkleRoot(reader io.Reader, numSegments uint64) (hash Hash, err error) {
func ReaderMerkleRoot(r io.Reader, size uint64) (Hash, error) {
return readerMerkleRoot(r, CalculateSegments(size))
}
func readerMerkleRoot(reader io.Reader, numSegments uint64) (hash Hash, err error) {
if numSegments == 0 {
err = errors.New("no data")
return
......@@ -41,30 +55,18 @@ func ReaderMerkleRoot(reader io.Reader, numSegments uint64) (hash Hash, err erro
}
// since we always read "left to right", no extra Seeking is necessary
left, err := ReaderMerkleRoot(reader, mid)
right, err := ReaderMerkleRoot(reader, numSegments-mid)
left, err := readerMerkleRoot(reader, mid)
right, err := readerMerkleRoot(reader, numSegments-mid)
hash = JoinHash(left, right)
return
}
// Calculates the number of segments in the file when building a merkle tree.
// Should probably be renamed to CountLeaves() or something.
//
// TODO: Why is this in package hash?
func CalculateSegments(fileSize uint64) (numSegments uint64) {
numSegments = fileSize / SegmentSize
if fileSize%SegmentSize != 0 {
numSegments++
}
return
}
// buildProof constructs a list of hashes using the following procedure. The
// storage proof requires traversing the Merkle tree from the proofIndex node
// to the root. On each level of the tree, we must provide the hash of the
// "sister" node. (Since this is a binary tree, the sister node is the other
// node with the same parent as us.) To obtain this hash, we call
// ReaderMerkleRoot on the segment of data corresponding to the sister. This
// readerMerkleRoot on the segment of data corresponding to the sister. This
// segment will double in size on each iteration until we reach the root.
//
// TODO: Gain higher certianty of correctness.
......@@ -78,7 +80,7 @@ func BuildReaderProof(rs io.ReadSeeker, numSegments, proofIndex uint64) (baseSeg
}
// Construct the hash set that proves the base segment is a part of the
// merkle tree of the reader. (Verifier needs to know the merkle root of
// Merkle tree of the reader. (Verifier needs to know the Merkle root of
// the file in advance.)
for size := uint64(1); size < numSegments; size <<= 1 {
// determine sister index
......@@ -103,7 +105,7 @@ func BuildReaderProof(rs io.ReadSeeker, numSegments, proofIndex uint64) (baseSeg
// calculate and append hash
var h Hash
h, err = ReaderMerkleRoot(rs, truncSize)
h, err = readerMerkleRoot(rs, truncSize)
if err != nil {
return
}
......@@ -113,7 +115,7 @@ func BuildReaderProof(rs io.ReadSeeker, numSegments, proofIndex uint64) (baseSeg
return
}
// verifyProof traverses a StorageProof, hashing elements together to produce
// VerifySegment traverses a hash set, hashing elements together to produce
// the root-level hash, which is then checked against the expected result.
// Care must be taken to ensure that the correct ordering is used when
// concatenating hashes.
......@@ -127,7 +129,7 @@ func BuildReaderProof(rs io.ReadSeeker, numSegments, proofIndex uint64) (baseSeg
// indicates "keep." I don't know why this works, I just noticed the pattern.
//
// TODO: Gain higher certainty of correctness.
func VerifyReaderProof(baseSegment [SegmentSize]byte, hashSet []Hash, numSegments, proofIndex uint64, expectedRoot Hash) bool {
func VerifySegment(baseSegment [SegmentSize]byte, hashSet []Hash, numSegments, proofIndex uint64, expectedRoot Hash) bool {
h := HashBytes(baseSegment[:])
depth := uint64(0)
......
......@@ -186,20 +186,15 @@ func (h *Host) NegotiateContract(conn net.Conn) (err error) {
if err != nil {
return
}
// Download file contents.
// TODO: calculate Merkle root simultaneously
_, err = io.CopyN(file, conn, int64(terms.FileSize))
if err != nil {
return
}
// Calculate Merkle root.
_, err = file.Seek(0, 0)
if err != nil {
return
}
merkleRoot, err := hash.ReaderMerkleRoot(file, hash.CalculateSegments(terms.FileSize))
// simultaneously download file and calculate its Merkle root.
tee := io.TeeReader(
// use a LimitedReader to ensure we don't read indefinitely
io.LimitReader(conn, int64(terms.FileSize)),
// each byte we read from tee will also be written to file
file,
)
merkleRoot, err := hash.ReaderMerkleRoot(tee, terms.FileSize)
if err != nil {
return
}
......
......@@ -60,19 +60,25 @@ func (r *Renter) negotiateContract(host modules.HostEntry, up modules.UploadPara
height := r.state.Height()
r.state.RUnlock()
// get filesize via Seek
// (these Seeks are guaranteed not to return errors)
n, _ := up.Data.Seek(0, 2)
filesize := uint64(n)
up.Data.Seek(0, 0) // seek back to beginning
// create ContractTerms
terms := modules.ContractTerms{
FileSize: up.FileSize,
FileSize: filesize,
StartHeight: height + up.Delay,
WindowSize: 0, // ??
NumWindows: 0, // ?? duration/windowsize + 1?
ClientPayout: 0, // ??
HostPayout: 0, // ??
WindowSize: 0, // ??
NumWindows: 0, // ?? duration/windowsize + 1?
ClientPayout: 0, // ??
HostPayout: host.Price, // ??
ValidProofAddress: host.CoinAddress,
MissedProofAddress: consensus.ZeroAddress,
}
// TODO: call r.hostDB.FlagHost(host.IPAddress) if negotiation unnecessful
// TODO: call r.hostDB.FlagHost(host.IPAddress) if negotiation is unsuccessful
// (and it isn't our fault)
err = host.IPAddress.Call("NegotiateContract", func(conn net.Conn) (err error) {
// send ContractTerms
......@@ -87,15 +93,14 @@ func (r *Renter) negotiateContract(host modules.HostEntry, up modules.UploadPara
if response != modules.AcceptContractResponse {
return errors.New(response)
}
// host accepted, so transmit file data
_, err = io.CopyN(conn, up.Data, int64(up.FileSize))
// reset seek position
up.Data.Seek(0, 0)
// simultaneously transmit file data and calculate Merkle root
tee := io.TeeReader(up.Data, conn)
merkleRoot, err := hash.ReaderMerkleRoot(tee, filesize)
if err != nil {
return
}
// create and transmit transaction containing file contract
txn, err := r.createContractTransaction(host, terms, up.MerkleRoot)
txn, err := r.createContractTransaction(host, terms, merkleRoot)
if err != nil {
return
}
......
......@@ -6,7 +6,6 @@ import (
"path/filepath"
"strconv"
"github.com/NebulousLabs/Sia/hash"
"github.com/NebulousLabs/Sia/modules"
)
......@@ -29,22 +28,8 @@ func (d *daemon) fileUploadHandler(w http.ResponseWriter, req *http.Request) {
}
defer file.Close()
// calculate filesize (via Seek; 2 means "relative to the end")
n, _ := file.Seek(0, 2)
filesize := uint64(n)
file.Seek(0, 0) // reset
// calculate Merkle root
merkle, err := hash.ReaderMerkleRoot(file, hash.CalculateSegments(filesize))
if err != nil {
http.Error(w, "Couldn't calculate Merkle root: "+err.Error(), 500)
return
}
err = d.renter.Upload(modules.UploadParams{
Data: file,
FileSize: filesize,
MerkleRoot: merkle,
Data: file,
// TODO: the user should probably supply these
Duration: duration,
......@@ -75,25 +60,8 @@ func (d *daemon) fileUploadPathHandler(w http.ResponseWriter, req *http.Request)
return
}
// calculate filesize
info, err := file.Stat()
if err != nil {
http.Error(w, "Couldn't stat file: "+err.Error(), 400)
return
}
filesize := uint64(info.Size())
// calculate Merkle root
merkle, err := hash.ReaderMerkleRoot(file, hash.CalculateSegments(uint64(info.Size())))
if err != nil {
http.Error(w, "Couldn't calculate Merkle root: "+err.Error(), 500)
return
}
err = d.renter.Upload(modules.UploadParams{
Data: file,
FileSize: filesize,
MerkleRoot: merkle,
Data: file,
// TODO: the user should probably supply these
Duration: duration,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment