Commit adaa1460 authored by cznic's avatar cznic

Release the new planner.

parent ba11fd66
......@@ -31,7 +31,7 @@ cpu: ql.test
go tool pprof --lines $< cpu.out
editor: ql.y scanner.go parser.go coerce.go
go fmt
gofmt -s -l -w *.go
go test -i
go test
......@@ -50,7 +50,7 @@ parser.go: parser.y
a=$(shell tempfile) ; \
goyacc -o /dev/null -xegen $$a $< ; \
goyacc -o $@ -xe $$a $< ; \
rm -f $a
rm -f $$a
sed -i -e 's|//line.*||' -e 's/yyEofCode/yyEOFCode/' $@
ql.test: all
......
......@@ -19,7 +19,6 @@ import (
"path/filepath"
"runtime"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
......@@ -832,8 +831,7 @@ func Example_recordsetFields() {
panic(err)
}
ctx := NewRWCtx()
rs, _, err := db.Run(ctx, `
rs, _, err := db.Run(NewRWCtx(), `
BEGIN TRANSACTION;
CREATE TABLE t (s string, i int);
CREATE TABLE u (s string, i int);
......@@ -851,29 +849,20 @@ func Example_recordsetFields() {
;
COMMIT;
// [0]: Fields are not computable.
SELECT * FROM noTable;
// [1]: Fields are computable even when Do will fail (table noTable does not exist).
SELECT X AS Y FROM noTable;
// [2]: Both Fields and Do are okay.
SELECT t.s+u.s as a, t.i+u.i as b, "noName", "name" as Named FROM t, u;
// [3]: Filds are computable even when Do will fail (uknown column a).
SELECT DISTINCT s as S, sum(i) as I FROM (
SELECT t.s+u.s as s, t.i+u.i, 3 as i FROM t, u;
)
GROUP BY a
ORDER BY d;
GROUP BY s
ORDER BY I;
// [4]: Fields are computable even when Do will fail on missing $1.
SELECT DISTINCT * FROM (
SELECT t.s+u.s as S, t.i+u.i, 3 as I FROM t, u;
)
WHERE I < $1
ORDER BY S;
` /* , 42 */) // <-- $1 missing
`, 42)
if err != nil {
panic(err)
}
......@@ -886,27 +875,11 @@ func Example_recordsetFields() {
default:
fmt.Printf("Fields[%d]: %#v\n", i, fields)
}
if err = v.Do(
true,
func(data []interface{}) (more bool, err error) {
fmt.Printf(" Do[%d]: %#v\n", i, data)
return false, nil
},
); err != nil {
fmt.Printf(" Do[%d]: error: %s\n", i, err)
}
}
// Output:
// Fields[0]: error: table noTable does not exist
// Do[0]: error: table noTable does not exist
// Fields[1]: []string{"Y"}
// Do[1]: error: table noTable does not exist
// Fields[2]: []string{"a", "b", "", "Named"}
// Do[2]: []interface {}{"a", "b", "", "Named"}
// Fields[3]: []string{"S", "I"}
// Do[3]: error: unknown column a
// Fields[4]: []string{"S", "", "I"}
// Do[4]: error: missing $1
// Fields[0]: []string{"a", "b", "", "Named"}
// Fields[1]: []string{"S", "I"}
// Fields[2]: []string{"S", "", "I"}
}
func TestRowsAffected(t *testing.T) {
......@@ -1052,14 +1025,6 @@ func dumpDB(db *DB, tag string) (string, error) {
return buf.String(), nil
}
func dumpTables4(db *DB) {
dbg("---- db.root.head is %v", db.root.head)
for t := db.root.thead; t != nil; t = t.tnext {
dbg("\ttable @ %v: %q, next %v", t.h, t.name, t.next)
}
dbg("----")
}
func testIndices(db *DB, t *testing.T) {
ctx := NewRWCtx()
var err error
......@@ -2093,97 +2058,6 @@ func TestIssue28(t *testing.T) {
}
}
func TestIsPossiblyRewriteableCrossJoinWhereExpression(t *testing.T) {
db, err := OpenMem()
if err != nil {
t.Fatal(err)
}
table := []struct {
q string
e bool
slist string
}{
// 0
{"SELECT * FROM t WHERE !c", false, ""},
{"SELECT * FROM t WHERE !t.c && 4 < !u.c", true, "!c|4<!c"},
{"SELECT * FROM t WHERE !t.c && 4 < u.c", true, "!c|4<c"},
{"SELECT * FROM t WHERE !t.c", true, "!c"},
{"SELECT * FROM t WHERE 3 < c", false, ""},
// 5
{"SELECT * FROM t WHERE 3 < t.c", true, "3<c"},
{"SELECT * FROM t WHERE c && c", false, ""},
{"SELECT * FROM t WHERE c && u.c", false, ""},
{"SELECT * FROM t WHERE c == 42", false, ""},
{"SELECT * FROM t WHERE c > 3", false, ""},
// 10
{"SELECT * FROM t WHERE c", false, ""},
{"SELECT * FROM t WHERE false == !t.c", true, "false==!c"}, //TODO(indices) support !c relOp fixedValue (rewrite false==!c -> !c, true==c -> c, true != c -> !c, etc.)
{"SELECT * FROM t WHERE false == ^t.c", false, ""},
{"SELECT * FROM t WHERE false == t.c", true, "false==c"},
{"SELECT * FROM t WHERE t.c && 4 < u.c", true, "c|4<c"},
// 15
{"SELECT * FROM t WHERE t.c && c", false, ""},
{"SELECT * FROM t WHERE t.c && u.c && v.c > 0", true, "c|c|c>0"},
{"SELECT * FROM t WHERE t.c && u.c && v.c", true, "c|c|c"},
{"SELECT * FROM t WHERE t.c && u.c > 0 && v.c > 0", true, "c|c>0|c>0"},
{"SELECT * FROM t WHERE t.c && u.c", true, "c|c"},
// 20
{"SELECT * FROM t WHERE t.c < 3 && u.c > 2 && v.c != 42", true, "c<3|c>2|c!=42"},
{"SELECT * FROM t WHERE t.c > 0 && u.c && v.c", true, "c>0|c|c"},
{"SELECT * FROM t WHERE t.c > 0 && u.c > 0 && v.c > 0", true, "c>0|c>0|c>0"},
{"SELECT * FROM t WHERE t.c > 3 && 4 < u.c", true, "c>3|4<c"},
{"SELECT * FROM t WHERE t.c > 3 && u.c", true, "c>3|c"},
// 25
{"SELECT * FROM t WHERE t.c > 3", true, "c>3"},
{"SELECT * FROM t WHERE t.c", true, "c"},
{"SELECT * FROM t WHERE u.c == !t.c", false, ""},
{"SELECT * FROM t WHERE u.c == 42", true, "c==42"},
{"SELECT * FROM t WHERE u.c == ^t.c", false, ""},
}
for i, test := range table {
q, e, list := test.q, test.e, strings.Split(test.slist, "|")
sort.Strings(list)
l, err := Compile(q)
if err != nil {
t.Fatalf("%s\n%v", q, err)
}
rs, _, err := db.Execute(nil, l)
if err != nil {
t.Fatalf("%s\n%v", q, err)
}
r := rs[0].(recordset)
sel := r.rset.(*selectRset)
where := sel.src.(*whereRset)
g, glist := isPossiblyRewriteableCrossJoinWhereExpression(where.expr)
if g != e {
t.Fatalf("%d: %sg: %v e: %v", i, l, g, e)
}
if !g {
continue
}
a := []string{}
for _, v := range glist {
a = append(a, v.expr.String())
}
sort.Strings(a)
if g, e := len(glist), len(list); g != e {
t.Fatalf("%d: g: %v, e: %v", i, glist, list)
}
for j, g := range a {
if e := list[j]; g != e {
t.Fatalf("%d[%d]: g: %v e: %v", i, j, g, e)
}
}
}
}
func dumpFields(f []*fld) string {
a := []string{}
for _, v := range f {
......@@ -2907,6 +2781,7 @@ func testMentionedColumns(s stmt) (err error) {
commitStmt,
*dropIndexStmt,
*dropTableStmt,
*explainStmt,
rollbackStmt,
*truncateTableStmt:
// nop
......@@ -2928,9 +2803,6 @@ func testMentionedColumns(s stmt) (err error) {
for _, f := range x.flds {
mentionedColumns(f.expr)
}
if o := x.outer; o != nil {
mentionedColumns(o.on)
}
if l := x.limit; l != nil {
mentionedColumns(l.expr)
}
......@@ -2953,7 +2825,6 @@ func testMentionedColumns(s stmt) (err error) {
mentionedColumns(e)
}
default:
dbg("%T", x)
panic("internal error 056")
}
return nil
......@@ -3213,7 +3084,7 @@ func issue99Fill(db *sql.DB) (int, error) {
}
sql := "INSERT INTO Node (" + strings.Join(fieldsIssue99, ",") + ") VALUES ($1, $2, $3, $4"
for i, _ := range valuesIssue99 {
for i := range valuesIssue99 {
if i > 3 {
sql += ", $" + strconv.Itoa(i+1)
}
......@@ -3251,16 +3122,6 @@ func testIssue99(tb testing.TB, db *sql.DB) int {
return sum
}
func TestIssue99(t *testing.T) {
RegisterMemDriver()
db, err := sql.Open("ql-mem", "issue99")
if err != nil {
t.Fatal(err)
}
t.Logf("Total rows inserted %v", testIssue99(t, db))
}
var benchmarkIssue99 sync.Once
func BenchmarkIssue99(b *testing.B) {
......
......@@ -7,7 +7,6 @@ package ql
import (
"bytes"
"encoding/gob"
"log"
"math/big"
"sync"
"time"
......@@ -29,7 +28,7 @@ func init() {
"Jul 9, 2012 at 5:02am (CEST)",
time.FixedZone("XYZ", 1234),
); err != nil {
log.Panic(err)
panic(err)
}
newGobCoder()
}
......@@ -45,40 +44,40 @@ func newGobCoder() (g *gobCoder) {
g = &gobCoder{}
g.enc = gob.NewEncoder(&g.buf)
if err := g.enc.Encode(gobInitInt); err != nil {
log.Panic(err)
panic(err)
}
if err := g.enc.Encode(gobInitRat); err != nil {
log.Panic(err)
panic(err)
}
if err := g.enc.Encode(gobInitTime); err != nil {
log.Panic(err)
panic(err)
}
if err := g.enc.Encode(gobInitDuration); err != nil {
log.Panic(err)
panic(err)
}
g.dec = gob.NewDecoder(&g.buf)
i := big.NewInt(0)
if err := g.dec.Decode(i); err != nil {
log.Panic(err)
panic(err)
}
r := big.NewRat(3, 5)
if err := g.dec.Decode(r); err != nil {
log.Panic(err)
panic(err)
}
t := time.Now()
if err := g.dec.Decode(&t); err != nil {
log.Panic(err)
panic(err)
}
var d time.Duration
if err := g.dec.Decode(&d); err != nil {
log.Panic(err)
panic(err)
}
return
......@@ -118,8 +117,7 @@ func (g *gobCoder) encode(v interface{}) (b []byte, err error) {
case time.Duration:
err = g.enc.Encode(int64(x))
default:
//dbg("%T(%v)", v, v)
log.Panic("internal error 002")
panic("internal error 002")
}
b = g.buf.Bytes()
return
......@@ -151,7 +149,7 @@ func (g *gobCoder) decode(b []byte, typ int) (v interface{}, err error) {
err = g.dec.Decode(&x)
v = time.Duration(x)
default:
log.Panic("internal error 003")
panic("internal error 003")
}
return
}
......@@ -6,7 +6,6 @@ package ql
import (
"fmt"
"log"
"math/rand"
"reflect"
"strconv"
......@@ -278,7 +277,7 @@ func builtinCount(arg []interface{}, ctx map[interface{}]interface{}) (v interfa
n++
}
default:
log.Panic("internal error 067")
panic("internal error 067")
}
ctx[fn] = n
return
......@@ -557,7 +556,7 @@ func builtinID(arg []interface{}, ctx map[interface{}]interface{}) (v interface{
case int64:
return x, nil
default:
panic("internal error 072")
return nil, nil
}
}
......
......@@ -14,6 +14,18 @@
//
// Change list
//
// 2015-05-29: The execution planner was rewritten from scratch. It should use
// indices in all places where they were used before plus in some additional
// situations more. It is possible to investigate the plan using the newly
// added EXPLAIN statement. The QL tool is handy for such analysis. If the
// planner would have used an index, but no such exists, the plan includes
// hints in form of copy/paste ready CREATE INDEX statements.
//
// The planner is still quite simple and a lot of work on it is yet ahead. You
// can help this process by filling an issue with a schema and query which
// fails to use an index or indices when it should, in your opinion. Bonus
// points for including output of `ql 'explain <query>'`.
//
// 2015-05-09: The grammar of the CREATE INDEX statement now accepts an
// expression list instead of a single expression, which was further limited to
// just a column name or the built-in id(). As a side effect, composite
......@@ -237,18 +249,18 @@
//
// The following keywords are reserved and may not be used as identifiers.
//
// ADD COLUMN float int64 OUTER uint32
// ALTER complex128 float3 int8 RIGHT uint64
// AND complex64 float6 INTO SELECT uint8
// AS CREATE FROM 2 JOIN SET UNIQUE
// ASC DEFAULT GROUP 4 LEFT string UPDATE
// BETWEEN DELETE IF LIMIT TABLE VALUES
// bigint DESC IN LIKE time WHERE
// bigrat DISTINCT INDEX NOT true
// blob DROP INSERT NULL OR
// bool duration int OFFSET TRUNCATE
// BY EXISTS int16 ON uint
// byte false int32 ORDER uint16
// ADD COLUMN false int32 ORDER uint16
// ALTER complex128 float int64 OUTER uint32
// AND complex64 float32 int8 RIGHT uint64
// AS CREATE float64 INTO SELECT uint8
// ASC DEFAULT FROM JOIN SET UNIQUE
// BETWEEN DELETE GROUP LEFT string UPDATE
// bigint DESC IF LIMIT TABLE VALUES
// bigrat DISTINCT IN LIKE time WHERE
// blob DROP INDEX NOT true
// bool duration INSERT NULL OR
// BY EXISTS int OFFSET TRUNCATE
// byte EXPLAIN int16 ON uint
//
// Keywords are not case sensitive.
//
......@@ -1252,7 +1264,7 @@
// Statement = EmptyStmt | AlterTableStmt | BeginTransactionStmt | CommitStmt
// | CreateIndexStmt | CreateTableStmt | DeleteFromStmt | DropIndexStmt
// | DropTableStmt | InsertIntoStmt | RollbackStmt | SelectStmt
// | TruncateTableStmt | UpdateStmt .
// | TruncateTableStmt | UpdateStmt | ExplainStmt.
//
// StatementList = Statement { ";" Statement } .
//
......@@ -1397,9 +1409,7 @@
// CreateTableStmt = "CREATE" "TABLE" [ "IF" "NOT" "EXISTS" ] TableName
// "(" ColumnDef { "," ColumnDef } [ "," ] ")" .
//
// ColumnDef = ColumnName Type
// [ "NOT" "NULL" | Expression ]
// [ "DEFAULT" Expression ] .
// ColumnDef = ColumnName Type [ "NOT" "NULL" | Expression ] [ "DEFAULT" Expression ] .
// ColumnName = identifier .
// TableName = identifier .
//
......@@ -1591,6 +1601,68 @@
// on a per row basis. The details are discussed in the "Constraints and
// defaults" chapter below the CREATE TABLE statement documentation.
//
// Explain statement
//
// Explain statement produces a recordset consisting of lines of text which
// describe the execution plan of a statement, if any.
//
// ExplainStmt = "EXPLAIN" Statement .
//
// For example, the QL tool treats the explain statement specially and outputs
// the joined lines:
//
// $ ql 'create table t(i int); create table u(j int)'
// $ ql 'explain select * from t, u where t.i > 42 && u.j < 314'
// ┌Compute Cartesian product of
// │ ┌Iterate all rows of table "t"
// │ └Output field names ["i"]
// │ ┌Iterate all rows of table "u"
// │ └Output field names ["j"]
// └Output field names ["t.i" "u.j"]
// ┌Filter on t.i > 42 && u.j < 314
// │Possibly useful indices
// │CREATE INDEX xt_i ON t(i);
// │CREATE INDEX xu_j ON u(j);
// └Output field names ["t.i" "u.j"]
// $ ql 'CREATE INDEX xt_i ON t(i); CREATE INDEX xu_j ON u(j);'
// $ ql 'explain select * from t, u where t.i > 42 && u.j < 314'
// ┌Compute Cartesian product of
// │ ┌Iterate all rows of table "t" using index "xt_i" where i > 42
// │ └Output field names ["i"]
// │ ┌Iterate all rows of table "u" using index "xu_j" where j < 314
// │ └Output field names ["j"]
// └Output field names ["t.i" "u.j"]
// $ ql 'explain select * from t where i > 12 and i between 10 and 20 and i < 42'
// ┌Iterate all rows of table "t" using index "xt_i" where i > 12 && <= 20
// └Output field names ["i"]
// $
//
// The explanation may aid in uderstanding how a statement/query would be
// executed and if indices are used as expected - or which indices may possibly
// improve the statement performance. The create index statements above were
// directly copy/pasted in the terminal from the suggestions provided by the
// filter recordset pipeline part returned by the explain statement.
//
// If the statement has nothing special in its plan, the result is the original
// statement.
//
// $ ql 'explain delete from t where 42 < i'
// DELETE FROM t WHERE i > 42;
// $
//
// To get an explanation of the select statement of the IN predicate, use the EXPLAIN
// statement with that particular select statement.
//
// $ ql 'explain select * from t where i in (select j from u where j > 0)'
// ┌Iterate all rows of table "t"
// └Output field names ["i"]
// ┌Filter on i IN (SELECT j FROM u WHERE j > 0;)
// └Output field names ["i"]
// $ ql 'explain select j from u where j > 0'
// ┌Iterate all rows of table "u" using index "xu_j" where j > 0
// └Output field names ["j"]
// $
//
// ROLLBACK
//
// The rollback statement closes the innermost transaction nesting level
......
......@@ -8,7 +8,6 @@ import (
"bytes"
"fmt"
"io"
"log"
"math"
"math/big"
"strings"
......@@ -325,7 +324,7 @@ func indexExpr(s *string, x interface{}) (i uint64, err error) {
return uint64(x), nil
default:
return 0, fmt.Errorf("non-integer string index %v", x)
return 0, fmt.Errorf("non-integer string index %v (value of type %T)", x, x)
}
}
......@@ -484,16 +483,16 @@ func ideal(v interface{}) interface{} {
}
}
func eval(v expression, execCtx *execCtx, ctx map[interface{}]interface{}, arg []interface{}) (y interface{}) {
y, err := expand1(v.eval(execCtx, ctx, arg))
func eval(v expression, execCtx *execCtx, ctx map[interface{}]interface{}) (y interface{}) {
y, err := expand1(v.eval(execCtx, ctx))
if err != nil {
panic(err) // panic ok here
}
return
}
func eval2(a, b expression, execCtx *execCtx, ctx map[interface{}]interface{}, arg []interface{}) (x, y interface{}) {
return eval(a, execCtx, ctx, arg), eval(b, execCtx, ctx, arg)
func eval2(a, b expression, execCtx *execCtx, ctx map[interface{}]interface{}) (x, y interface{}) {
return eval(a, execCtx, ctx), eval(b, execCtx, ctx)
}
func invOp2(x, y interface{}, o int) (interface{}, error) {
......@@ -1327,10 +1326,8 @@ func convert(val interface{}, typ int) (v interface{}, err error) { //NTYPE
return invConv(val, typ)
}
default:
log.Panic("internal error 006")
panic("internal error 006")
}
//dbg("%T(%v) %s", val, val, typeStr(typ))
panic("unreachable")
}
func invShiftRHS(lhs, rhs interface{}) (interface{}, error) {
......@@ -1345,6 +1342,17 @@ func overflow(v interface{}, typ int) error {
return fmt.Errorf("constant %v overflows %s", v, typeStr(typ))
}
func typeCheck1(val interface{}, c *col) (interface{}, error) {
rec := []interface{}{val}
c = c.clone()
c.index = 0
if err := typeCheck(rec, []*col{c}); err != nil {
return nil, err
}
return rec[0], nil
}
func typeCheck(rec []interface{}, cols []*col) (err error) {
for _, c := range cols {
i := c.index
......@@ -2666,12 +2674,12 @@ func collate1(a, b interface{}) int {
case chunk:
a, err := x.expand()
if err != nil {
log.Panic(err)
panic(err)
}
b, err := y.expand()
if err != nil {
log.Panic(err)
panic(err)
}
return collate1(a, b)
......@@ -2782,8 +2790,16 @@ func selector(s string) string {
func mustSelector(s string) string {
q := selector(s)
if q == s {
panic("internal error 069")
panic("internal error 053")
}
return q
}
func qnames(l []string) []string {
r := make([]string, len(l))
for i, v := range l {
r[i] = fmt.Sprintf("%q", v)
}
return r
}
This diff is collapsed.
......@@ -13,7 +13,6 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"math/big"
"os"
"path/filepath"
......@@ -261,22 +260,22 @@ func infer(from []interface{}, to *[]*col) {
case chunk:
vals, err := lldb.DecodeScalars([]byte(x.b))
if err != nil {
log.Panic("err")
panic(err)
}
if len(vals) == 0 {
log.Panic("internal error 040")
panic("internal error 040")
}
i, ok := vals[0].(int64)
if !ok {
log.Panic("internal error 041")
panic("internal error 041")
}
c.typ = int(i)
case map[string]interface{}: // map of ids of a cross join
default:
log.Panic("internal error 042")
panic("internal error 042")
}
}
}
......@@ -484,7 +483,7 @@ func newFileFromOSFile(f lldb.OSFile) (fi *file, err error) {
}
if h != 1 { // root
log.Panic("internal error 043")
panic("internal error 043")
}
if h, err = s.a.Alloc(make([]byte, 8)); err != nil {
......@@ -492,7 +491,7 @@ func newFileFromOSFile(f lldb.OSFile) (fi *file, err error) {
}
if h != 2 { // id
log.Panic("internal error 044")
panic("internal error 044")
}
close, closew = false, false
......@@ -629,20 +628,20 @@ func (s *file) expandBytes(d []interface{}) (err error) {
func (s *file) collate(a, b []byte) int { //TODO w/ error return
da, err := lldb.DecodeScalars(a)
if err != nil {
log.Panic(err)
panic(err)
}
if err = s.expandBytes(da); err != nil {
log.Panic(err)
panic(err)
}
db, err := lldb.DecodeScalars(b)
if err != nil {
log.Panic(err)
panic(err)
}
if err = s.expandBytes(db); err != nil {
log.Panic(err)
panic(err)
}
//dbg("da: %v, db: %v", da, db)
......@@ -834,7 +833,7 @@ func (s *file) Read(dst []interface{}, h int64, cols ...*col) (data []interface{
return nil, fmt.Errorf("(file-006) corrupted DB: non nil chunk type is not []byte")
}
default:
log.Panic("internal error 045")
panic("internal error 045")
}
}
......
......@@ -228,7 +228,7 @@ func TestSchema(t *testing.T) {
s, err := Compile(test.s)
if err != nil {
panic("internal error 070")
panic("internal error 055")
}
if g, e := l.String(), s.String(); g != e {
......
......@@ -10,7 +10,6 @@ import (
"bytes"
"fmt"
"io"
"log"
"math/big"
"time"
)
......@@ -258,7 +257,7 @@ func newMemStorage() (s *mem, err error) {
h, err := s.Create()
if h != 1 {
log.Panic("internal error 048")
panic("internal error 048")
}
if err = s.Commit(); err != nil {
......@@ -424,7 +423,7 @@ func (s *mem) clone(data ...interface{}) []interface{} {
case map[string]interface{}: // map of ids of a cross join
r[i] = x
default:
log.Panic("internal error 050")
panic("internal error 050")
}
}
return r
......@@ -536,7 +535,7 @@ func (s *mem) Rollback() (err error) {
x, v := data[0].(*memIndex), data[1].(memIndex)
*x = v
default:
log.Panic("internal error 051")
panic("internal error 051")
}
}
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
......@@ -82,6 +82,7 @@ DeleteFromStmt = "DELETE" "FROM" TableName [ WhereClause ] .
DropIndexStmt = "DROP" "INDEX" [ "IF" "EXISTS" ] IndexName .
DropTableStmt = "DROP" "TABLE" [ "IF" "EXISTS" ] TableName .
EmptyStmt = .
ExplainStmt = "EXPLAIN" Statement .
Expression = Term {
( oror | "OR" ) Term
} .
......@@ -179,7 +180,8 @@ Statement = EmptyStmt
| RollbackStmt
| SelectStmt
| TruncateTableStmt
| UpdateStmt .
| UpdateStmt
| ExplainStmt .
StatementList = Statement { ";" Statement } .