Commit a9f03fa5 authored by cznic's avatar cznic

release v1.0.5: Unvendor lldb, use stable lldb. Closes #128.

parent 2cbfdfc3
......@@ -2,13 +2,18 @@
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
.PHONY: all clean nuke
.PHONY: all clean cover cpu editor internalError later mem nuke todo edit
grep=--include=*.go --include=*.l --include=*.y --include=*.yy --exclude=ql.y
ngrep='TODOOK\|parser\.go\|scanner\.go\|.*_string\.go'
all: editor scanner.go parser.go
go build
go vet || true
golint
go vet 2>&1 | grep -v $(ngrep) || true
golint 2>&1 | grep -v $(ngrep) || true
make todo
unused . || true
misspell *.go
gosimple || true
go install ./...
bench: all
......@@ -16,7 +21,7 @@ bench: all
clean:
go clean
rm -f *~ y.go y.tab.c *.out ql.test
rm -f *~ y.go y.tab.c *.out *.test
coerce.go: helper/helper.go
if [ -f coerce.go ] ; then rm coerce.go ; fi
......@@ -25,10 +30,12 @@ coerce.go: helper/helper.go
cover:
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
cpu: ql.test
go test -c
./$< -test.bench . -test.cpuprofile cpu.out
go tool pprof --lines $< cpu.out
cpu: clean
go test -run @ -bench . -cpuprofile cpu.out
go tool pprof -lines *.test cpu.out
edit:
gvim -p Makefile *.l *.y *.go
editor: ql.y scanner.go parser.go coerce.go
gofmt -s -l -w *.go
......@@ -38,12 +45,15 @@ editor: ql.y scanner.go parser.go coerce.go
internalError:
egrep -ho '"internal error.*"' *.go | sort | cat -n
mem: ql.test
go test -c
./$< -test.bench . -test.memprofile mem.out
go tool pprof --lines --web --alloc_space $< mem.out
later:
@grep -n $(grep) LATER * || true
@grep -n $(grep) MAYBE * || true
mem: clean
go test -run @ -bench . -memprofile mem.out -memprofilerate 1 -timeout 24h
go tool pprof -lines -web -alloc_space *.test mem.out
nuke:
nuke: clean
go clean -i
parser.go: parser.y
......@@ -53,8 +63,6 @@ parser.go: parser.y
rm -f $$a
sed -i -e 's|//line.*||' -e 's/yyEofCode/yyEOFCode/' [email protected]
ql.test: all
ql.y: doc.go
sed -n '1,/^package/ s/^\/\/ //p' < $< \
| ebnf2y -o [email protected] -oe $*.ebnf -start StatementList -pkg $* -p _
......@@ -64,11 +72,7 @@ scanner.go: scanner.l parser.go
golex -o [email protected] $<
todo:
@grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* *.go *.l parser.y || true
@grep -n TODO *.go *.l parser.y testdata.ql || true
@grep -n BUG *.go *.l parser.y || true
@grep -n println *.go *.l parser.y || true
later:
@grep -n LATER *.go *.l parser.y || true
@grep -n MAYBE *.go *.l parser.y || true
@grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true
@grep -nr $(grep) TODO * || true
@grep -nr $(grep) BUG * || true
@grep -nr $(grep) [^[:alpha:]]println * || true
......@@ -34,6 +34,7 @@ const benchScale = 1e6
func init() {
log.SetFlags(log.Flags() | log.Lshortfile)
isTesting = true
use(dieHard, caller, dumpTables2, dumpTables3, dumpFields, dumpFlds, dumpCols, typeof, stypeof)
}
func dieHard(exitValue int) {
......@@ -319,7 +320,7 @@ func rnds16(rng *rand.Rand, n int) string {
var (
benchmarkScaleOnce sync.Once
benchmarkSelectOnce = map[string]sync.Once{}
benchmarkSelectOnce = map[string]bool{}
)
func benchProlog(b *testing.B) {
......@@ -335,13 +336,12 @@ func benchmarkSelect(b *testing.B, n int, sel List, ts testDB) {
if testing.Verbose() {
benchProlog(b)
id := fmt.Sprintf("%T|%d", ts, n)
once := benchmarkSelectOnce[id]
once.Do(func() {
if !benchmarkSelectOnce[id] {
b.Logf(`Having a table of %d records, each of size 1kB, measure the performance of
%s
`, n, sel)
})
benchmarkSelectOnce[id] = once
}
benchmarkSelectOnce[id] = true
}
db, err := ts.setup()
......@@ -463,19 +463,18 @@ func TestString(t *testing.T) {
}
}
var benchmarkInsertOnce = map[string]sync.Once{}
var benchmarkInsertOnce = map[string]bool{}
func benchmarkInsert(b *testing.B, batch, total int, ts testDB) {
if testing.Verbose() {
benchProlog(b)
id := fmt.Sprintf("%T|%d|%d", ts, batch, total)
once := benchmarkInsertOnce[id]
once.Do(func() {
if !benchmarkInsertOnce[id] {
b.Logf(`In batches of %d record(s), insert a total of %d records, each of size 1kB, into a table.
`, batch, total)
})
benchmarkInsertOnce[id] = once
}
benchmarkInsertOnce[id] = true
}
if total%batch != 0 {
......@@ -1221,14 +1220,13 @@ func TestIndices(t *testing.T) {
testIndices(db, t)
}
var benchmarkInsertBoolOnce = map[string]sync.Once{}
var benchmarkInsertBoolOnce = map[string]bool{}
func benchmarkInsertBool(b *testing.B, db *DB, size int, selectivity float64, index bool, teardown func()) {
if testing.Verbose() {
benchProlog(b)
id := fmt.Sprintf("%t|%d|%g|%t", db.isMem, size, selectivity, index)
once := benchmarkInsertBoolOnce[id]
once.Do(func() {
if !benchmarkInsertBoolOnce[id] {
s := "INDEXED"
if !index {
s = "NON " + s
......@@ -1236,8 +1234,8 @@ func benchmarkInsertBool(b *testing.B, db *DB, size int, selectivity float64, in
b.Logf(`Insert %d records into a table having a single bool %s column. Batch size: 1 record.
`, size, s)
})
benchmarkInsertBoolOnce[id] = once
}
benchmarkInsertBoolOnce[id] = true
}
if teardown != nil {
......@@ -1401,7 +1399,7 @@ func BenchmarkInsertBoolFileX1e3(b *testing.B) {
benchmarkInsertBoolFile(b, 1e3, 0.5, true)
}
var benchmarkSelectBoolOnce = map[string]sync.Once{}
var benchmarkSelectBoolOnce = map[string]bool{}
func benchmarkSelectBool(b *testing.B, db *DB, size int, selectivity float64, index bool, teardown func()) {
sel, err := Compile("SELECT * FROM t WHERE b;")
......@@ -1412,8 +1410,7 @@ func benchmarkSelectBool(b *testing.B, db *DB, size int, selectivity float64, in
if testing.Verbose() {
benchProlog(b)
id := fmt.Sprintf("%t|%d|%g|%t", db.isMem, size, selectivity, index)
once := benchmarkSelectBoolOnce[id]
once.Do(func() {
if !benchmarkSelectBoolOnce[id] {
s := "INDEXED"
if !index {
s = "NON " + s
......@@ -1422,8 +1419,8 @@ func benchmarkSelectBool(b *testing.B, db *DB, size int, selectivity float64, in
%.0f%% of them are true. Measure the performance of
%s
`, s, size, 100*selectivity, sel)
})
benchmarkSelectBoolOnce[id] = once
}
benchmarkSelectBoolOnce[id] = true
}
if teardown != nil {
......@@ -1743,14 +1740,13 @@ func TestIndex(t *testing.T) {
}
}
var benchmarkCrossJoinOnce = map[string]sync.Once{}
var benchmarkCrossJoinOnce = map[string]bool{}
func benchmarkCrossJoin(b *testing.B, db *DB, create, sel List, size1, size2 int, index bool, teardown func()) {
if testing.Verbose() {
benchProlog(b)
id := fmt.Sprintf("%t|%d|%d|%t", db.isMem, size1, size2, index)
once := benchmarkCrossJoinOnce[id]
once.Do(func() {
if !benchmarkCrossJoinOnce[id] {
s := "INDEXED "
if !index {
s = "NON " + s
......@@ -1758,8 +1754,8 @@ func benchmarkCrossJoin(b *testing.B, db *DB, create, sel List, size1, size2 int
b.Logf(`Fill two %stables with %d and %d records of random numbers [0, 1). Measure the performance of
%s
`, s, size1, size2, sel)
})
benchmarkCrossJoinOnce[id] = once
}
benchmarkCrossJoinOnce[id] = true
}
if teardown != nil {
......@@ -2066,15 +2062,6 @@ func dumpFields(f []*fld) string {
return strings.Join(a, ", ")
}
func rndBytes(n int, seed int64) []byte {
rng := rand.New(rand.NewSource(seed))
b := make([]byte, n)
for i := range b {
b[i] = byte(rng.Int())
}
return b
}
func TestIssue50(t *testing.T) { // https://github.com/cznic/ql/issues/50
if testing.Short() {
t.Skip("skipping test in short mode.")
......
......@@ -221,7 +221,7 @@ record handle} and the B+Tree value is not used.
+------+-----------------+ +--------------+
If the indexed values are not all NULL then key of the B+Tree key are the indexed
values and the B+Tree value is the record handle.
values and the B+Tree value is the record handle.
B+Tree key B+Tree value
+----------------+ +---------------+
......@@ -262,7 +262,7 @@ out are stripped off and "resupplied" on decoding transparently. See also
blob.go. If the length of the resulting slice is <= shortBlob, the first and
only chunk is the scalar encoding of
[]interface{}{typeTag, slice}. // initial (and last) chunk
The length of slice can be zero (for blob("")). If the resulting slice is long
......@@ -285,9 +285,9 @@ Links
Referenced from above:
[0]: http://godoc.org/github.com/cznic/exp/lldb#hdr-Block_handles
[1]: http://godoc.org/github.com/cznic/exp/lldb#EncodeScalars
[2]: http://godoc.org/github.com/cznic/exp/lldb#BTree
[0]: http://godoc.org/github.com/cznic/lldb#hdr-Block_handles
[1]: http://godoc.org/github.com/cznic/lldb#EncodeScalars
[2]: http://godoc.org/github.com/cznic/lldb#BTree
Rationale
......
......@@ -14,6 +14,11 @@
//
// Change list
//
// 2016-07-11: Release v1.0.5 undoes vendoring of lldb. QL now uses stable lldb
// (github.com/cznic/lldb).
//
// https://github.com/cznic/ql/issues/128
//
// 2016-07-06: Release v1.0.4 fixes a panic when closing the WAL file.
//
// https://github.com/cznic/ql/pull/127
......@@ -804,7 +809,7 @@
//
// expr1 LIKE expr2
//
// yeild a boolean value true if expr2, a regular expression, matches expr1
// yield a boolean value true if expr2, a regular expression, matches expr1
// (see also [6]). Both expression must be of type string. If any one of the
// expressions is NULL the result is NULL.
//
......@@ -891,7 +896,7 @@
//
// expr IS NOT NULL // case B
//
// yeild a boolean value true if expr does not have a specific type (case A) or
// yield a boolean value true if expr does not have a specific type (case A) or
// if expr has a specific type (case B). In other cases the result is a boolean
// value false.
//
......
......@@ -10,7 +10,6 @@ import (
"io"
"math"
"math/big"
"strings"
"time"
)
......@@ -2764,38 +2763,6 @@ var isSystemName = map[string]bool{
"__Table": true,
}
func qualifier(s string) string {
if pos := strings.IndexByte(s, '.'); pos >= 0 {
s = s[:pos]
}
return s
}
func mustQualifier(s string) string {
q := qualifier(s)
if q == s {
panic("internal error 068")
}
return q
}
func selector(s string) string {
if pos := strings.IndexByte(s, '.'); pos >= 0 {
s = s[pos+1:]
}
return s
}
func mustSelector(s string) string {
q := selector(s)
if q == s {
panic("internal error 053")
}
return q
}
func qnames(l []string) []string {
r := make([]string, len(l))
for i, v := range l {
......
......@@ -135,12 +135,6 @@ func mentionedColumns(e expression) map[string]struct{} {
return m
}
func mentionedQColumns(e expression) map[string]struct{} {
m := map[string]struct{}{}
mentionedColumns0(e, true, false, m)
return m
}
func staticExpr(e expression) (expression, error) {
if e.isStatic() {
v, err := e.eval(nil, nil)
......@@ -166,11 +160,6 @@ type (
idealUint uint64
)
type exprTab struct {
expr expression
table string
}
type pexpr struct {
expr expression
}
......@@ -3397,20 +3386,6 @@ func (u *unaryOperation) String() string {
}
}
// !ident
func (u *unaryOperation) isNotQIdent() (bool, string, expression) {
if u.op != '!' {
return false, "", nil
}
id, ok := u.v.(*ident)
if ok && id.isQualified() {
return true, mustQualifier(id.s), &unaryOperation{'!', &ident{mustSelector(id.s)}}
}
return false, "", nil
}
func (u *unaryOperation) eval(execCtx *execCtx, ctx map[interface{}]interface{}) (r interface{}, err error) {
defer func() {
if e := recover(); e != nil {
......
......@@ -19,9 +19,9 @@ import (
"sync"
"time"
"github.com/cznic/lldb"
"github.com/cznic/mathutil"
"github.com/cznic/ql/vendored/github.com/camlistore/go4/lock"
"github.com/cznic/ql/vendored/github.com/cznic/exp/lldb"
)
const (
......
......@@ -42,7 +42,6 @@ type HTTPFile struct {
isFile bool
name string
off int
sz int
}
// Close implements http.File.
......@@ -212,7 +211,7 @@ func (db *DB) NewHTTPFS(query string) (*HTTPFS, error) {
// The elements in a file path are separated by slash ('/', U+002F) characters,
// regardless of host operating system convention.
func (f *HTTPFS) Open(name string) (http.File, error) {
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
if filepath.Separator != '/' && strings.Contains(name, string(filepath.Separator)) ||
strings.Contains(name, "\x00") {
return nil, fmt.Errorf("invalid character in file path: %q", name)
}
......@@ -264,7 +263,7 @@ func (f *HTTPFS) Open(name string) (http.File, error) {
n++
switch name := data[0].(type) {
case string:
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
if filepath.Separator != '/' && strings.Contains(name, string(filepath.Separator)) ||
strings.Contains(name, "\x00") {
return false, fmt.Errorf("invalid character in file path: %q", name)
}
......
......@@ -684,7 +684,7 @@ func TestMarshal(t *testing.T) {
case []byte:
switch y := e.(type) {
case []byte:
if bytes.Compare(x, y) != 0 {
if !bytes.Equal(x, y) {
t.Fatal(iTest, x, y)
}
default:
......
......@@ -4,7 +4,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Inital yacc source generated by ebnf2y[1]
// Initial yacc source generated by ebnf2y[1]
// at 2013-10-04 23:10:47.861401015 +0200 CEST
//
// $ ebnf2y -o ql.y -oe ql.ebnf -start StatementList -pkg ql -p _
......
......@@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Inital yacc source generated by ebnf2y[1]
// Initial yacc source generated by ebnf2y[1]
// at 2013-10-04 23:10:47.861401015 +0200 CEST
//
// $ ebnf2y -o ql.y -oe ql.ebnf -start StatementList -pkg ql -p _
......
......@@ -144,8 +144,8 @@ func (l List) String() string {
return b.String()
}
// IsExplainStmt reports whether l is a single EXPLAIN statment or a single EXPLAIN
// statment enclosed in a transaction.
// IsExplainStmt reports whether l is a single EXPLAIN statement or a single EXPLAIN
// statement enclosed in a transaction.
func (l List) IsExplainStmt() bool {
switch len(l.l) {
case 1:
......@@ -212,7 +212,7 @@ type TCtx struct {
// unique conext.
func NewRWCtx() *TCtx { return &TCtx{} }
// Recordset is a result of a select statment. It can call a user function for
// Recordset is a result of a select statement. It can call a user function for
// every row (record) in the set using the Do method.
//
// Recordsets can be safely reused. Evaluation of the rows is performed lazily.
......@@ -672,16 +672,6 @@ func (r tableRset) plan(ctx *execCtx) (plan, error) {
return rs, nil
}
func findFldIndex(fields []*fld, name string) int {
for i, f := range fields {
if f.name == name {
return i
}
}
return -1
}
func findFld(fields []*fld, name string) (f *fld) {
for _, f = range fields {
if f.name == name {
......@@ -1276,7 +1266,7 @@ func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tn
}
if pc != db.cc {
for db.rw == true {
for db.rw {
db.mu.Unlock() // Transaction isolation
db.mu.Lock()
}
......@@ -1501,7 +1491,7 @@ type IndexInfo struct {
Name string // Index name
Table string // Table name.
Column string // Column name.
Unique bool // Wheter the index is unique.
Unique bool // Whether the index is unique.
ExpressionList []string // Index expression list.
}
......
......@@ -3,7 +3,7 @@
//TODO Put your favorite license here
// yacc source generated by ebnf2y[1]
// at 2015-12-07 11:13:21.828981967 +0100 CET
// at 2016-07-11 14:15:11.623998412 +0200 CEST
//
// $ ebnf2y -o ql.y -oe ql.ebnf -start StatementList -pkg ql -p _
//
......
......@@ -10,8 +10,6 @@ import (
"unicode"
)
var bad = int(unicode.ReplacementChar)
func tok2name(i int) string {
if i == unicode.ReplacementChar {
return "<?>"
......
......@@ -8,7 +8,6 @@ import (
"bytes"
"fmt"
"strings"
"sync"
"github.com/cznic/strutil"
)
......@@ -716,7 +715,6 @@ type selectStmt struct {
group *groupByRset
hasAggregates bool
limit *limitRset
mu sync.Mutex
offset *offsetRset
order *orderByRset
where *whereRset
......
......@@ -137,8 +137,7 @@ type table struct {
defaults []expression
}
func (t *table) hasIndices() bool { return len(t.indices) != 0 || len(t.indices2) != 0 }
func (t *table) hasIndices2() bool { return len(t.indices2) != 0 }
func (t *table) hasIndices() bool { return len(t.indices) != 0 || len(t.indices2) != 0 }
func (t *table) constraintsAndDefaults(ctx *execCtx) error {
if isSystemName[t.name] {
......@@ -747,14 +746,6 @@ func (t *table) addRecord(execCtx *execCtx, r []interface{}) (id int64, err erro
return
}
func (t *table) flds() (r []*fld) {
r = make([]*fld, len(t.cols))
for i, v := range t.cols {
r[i] = &fld{expr: &ident{v.name}, name: v.name}
}
return
}
func (t *table) fieldNames() []string {
r := make([]string, len(t.cols))
for i, v := range t.cols {
......@@ -802,10 +793,10 @@ type root struct {
head int64 // Single linked table list
lastInsertID int64
parent *root
rowsAffected int64 //LATER implement
store storage
tables map[string]*table
thead *table
//rowsAffected int64 //LATER implement
store storage
tables map[string]*table
thead *table
}
func newRoot(store storage) (r *root, err error) {
......
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Two Phase Commit & Structural ACID
package lldb
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"os"
"github.com/cznic/fileutil"
"github.com/cznic/mathutil"
)
var _ Filer = &ACIDFiler0{} // Ensure ACIDFiler0 is a Filer
type acidWrite struct {
b []byte
off int64
}
type acidWriter0 ACIDFiler0
func (a *acidWriter0) WriteAt(b []byte, off int64) (n int, err error) {
f := (*ACIDFiler0)(a)
if f.bwal == nil { // new epoch
f.data = f.data[:0]
f.bwal = bufio.NewWriter(f.wal)
if err = a.writePacket([]interface{}{wpt00Header, walTypeACIDFiler0, ""}); err != nil {
return
}
}
if err = a.writePacket([]interface{}{wpt00WriteData, b, off}); err != nil {
return
}
f.data = append(f.data, acidWrite{b, off})
return len(b), nil
}
func (a *acidWriter0) writePacket(items []interface{}) (err error) {
f := (*ACIDFiler0)(a)
b, err := EncodeScalars(items...)
if err != nil {
return
}
var b4 [4]byte
binary.BigEndian.PutUint32(b4[:], uint32(len(b)))
if _, err = f.bwal.Write(b4[:]); err != nil {
return
}
if _, err = f.bwal.Write(b); err != nil {
return
}
if m := (4 + len(b)) % 16; m != 0 {
var pad [15]byte
_, err = f.bwal.Write(pad[:16-m])
}
return
}
// WAL Packet Tags
const (
wpt00Header = iota
wpt00WriteData
wpt00Checkpoint
)
const (
walTypeACIDFiler0 = iota
)
// ACIDFiler0 is a very simple, synchronous implementation of 2PC. It uses a
// single write ahead log file to provide the structural atomicity
// (BeginUpdate/EndUpdate/Rollback) and durability (DB can be recovered from
// WAL if a crash occurred).
//
// ACIDFiler0 is a Filer.
//
// NOTE: Durable synchronous 2PC involves three fsyncs in this implementation
// (WAL, DB, zero truncated WAL). Where possible, it's recommended to collect
// transactions for, say one second before performing the two phase commit as
// the typical performance for rotational hard disks is about few tens of
// fsyncs per second atmost. For an example of such collective transaction
// approach please see the colecting FSM STT in Dbm's documentation[1].
//
// [1]: http://godoc.org/github.com/cznic/exp/dbm
type ACIDFiler0 struct {
*RollbackFiler
wal *os.File
bwal *bufio.Writer
data []acidWrite
testHook bool // keeps WAL untruncated (once)
peakWal int64 // tracks WAL maximum used size
peakBitFilerPages int // track maximum transaction memory
}
// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal.
//
// If the WAL is zero sized then a previous clean shutdown of db is taken for
// granted and no recovery procedure is taken.
//
// If the WAL is of non zero size then it is checked for having a
// commited/fully finished transaction not yet been reflected in db. If such
// transaction exists it's committed to db. If the recovery process finishes
// successfully, the WAL is truncated to zero size and fsync'ed prior to return
// from NewACIDFiler0.
func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
fi, err := wal.Stat()
if err != nil {
return
}