diff --git a/data/test/tabletserver/exec_cases.txt b/data/test/tabletserver/exec_cases.txt
index d6b63f4b73e..51e873266d4 100644
--- a/data/test/tabletserver/exec_cases.txt
+++ b/data/test/tabletserver/exec_cases.txt
@@ -2,7 +2,6 @@
"select * from a union select * from b"
{
"PlanID": "PASS_SELECT",
- "Reason": "SELECT",
"FieldQuery": "select * from a where 1 != 1 union select * from b where 1 != 1",
"FullQuery": "select * from a union select * from b"
}
@@ -11,7 +10,6 @@
"select distinct * from a"
{
"PlanID": "PASS_SELECT",
- "Reason": "SELECT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select distinct * from a limit :#maxLimit"
@@ -21,7 +19,6 @@
"select * from a group by b"
{
"PlanID": "PASS_SELECT",
- "Reason": "SELECT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a group by b limit :#maxLimit"
@@ -31,7 +28,6 @@
"select * from a having b=1"
{
"PlanID": "PASS_SELECT",
- "Reason": "SELECT",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
"FullQuery": "select * from a having b = 1 limit :#maxLimit"
@@ -41,18 +37,15 @@
"select * from a limit 5"
{
"PlanID": "PASS_SELECT",
- "Reason": "WHERE",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a limit 5",
- "ColumnNumbers": [0, 1, 2, 3, 4]
+ "FullQuery": "select * from a limit 5"
}
# cross-db
"select * from a.b"
{
"PlanID": "PASS_SELECT",
- "Reason": "TABLE",
"FieldQuery": "select * from a.b where 1 != 1",
"FullQuery": "select * from a.b limit :#maxLimit"
}
@@ -61,7 +54,6 @@
"select * from a,b"
{
"PlanID": "PASS_SELECT",
- "Reason": "TABLE",
"FieldQuery": "select * from a, b where 1 != 1",
"FullQuery": "select * from a, b limit :#maxLimit"
}
@@ -70,7 +62,6 @@
"select * from a join b"
{
"PlanID": "PASS_SELECT",
- "Reason": "TABLE",
"FieldQuery": "select * from a join b where 1 != 1",
"FullQuery": "select * from a join b limit :#maxLimit"
}
@@ -79,26 +70,14 @@
"select * from a right join b on c = d"
{
"PlanID": "PASS_SELECT",
- "Reason": "TABLE",
"FieldQuery": "select * from a right join b on 1 != 1 where 1 != 1",
"FullQuery": "select * from a right join b on c = d limit :#maxLimit"
}
-# table not cached
-"select * from b"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "NOCACHE",
- "TableName": "b",
- "FieldQuery": "select * from b where 1 != 1",
- "FullQuery": "select * from b limit :#maxLimit"
-}
-
# Parenthesized table
"select * from (b)"
{
"PlanID": "PASS_SELECT",
- "Reason": "TABLE",
"FieldQuery": "select * from (b) where 1 != 1",
"FullQuery": "select * from (b) limit :#maxLimit"
}
@@ -107,90 +86,50 @@
"select :bv from a"
{
"PlanID": "PASS_SELECT",
- "Reason": "SELECT_LIST",
"TableName": "a",
"FullQuery": "select :bv from a limit :#maxLimit"
}
-# complex select list
-"select eid+1 from a"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "SELECT_LIST",
- "TableName": "a",
- "FieldQuery": "select eid + 1 from a where 1 != 1",
- "FullQuery": "select eid + 1 from a limit :#maxLimit"
-}
-
-# case in select list
-"select case when eid = 1 then 1 end from a"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "SELECT_LIST",
- "TableName": "a",
- "FieldQuery": "select case when eid = 1 then 1 end from a where 1 != 1",
- "FullQuery": "select case when eid = 1 then 1 end from a limit :#maxLimit"
-}
-
# simple
"select eid from a"
{
"PlanID": "PASS_SELECT",
- "Reason": "WHERE",
"TableName": "a",
"FieldQuery": "select eid from a where 1 != 1",
- "FullQuery": "select eid from a limit :#maxLimit",
- "ColumnNumbers": [0]
+ "FullQuery": "select eid from a limit :#maxLimit"
}
# as
"select eid as foo from a"
{
"PlanID": "PASS_SELECT",
- "Reason": "WHERE",
"TableName": "a",
"FieldQuery": "select eid as foo from a where 1 != 1",
- "FullQuery": "select eid as foo from a limit :#maxLimit",
- "ColumnNumbers": [0]
+ "FullQuery": "select eid as foo from a limit :#maxLimit"
}
# *
"select * from a"
{
"PlanID": "PASS_SELECT",
- "Reason": "WHERE",
"TableName": "a",
"FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3, 4]
+ "FullQuery": "select * from a limit :#maxLimit"
}
# c.eid
"select c.eid from a as c"
{
"PlanID": "PASS_SELECT",
- "Reason": "WHERE",
"TableName": "a",
"FieldQuery": "select c.eid from a as c where 1 != 1",
- "FullQuery": "select c.eid from a as c limit :#maxLimit",
- "ColumnNumbers": [0]
-}
-
-# (eid)
-"select (eid) from a"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "SELECT_LIST",
- "TableName": "a",
- "FieldQuery": "select (eid) from a where 1 != 1",
- "FullQuery": "select (eid) from a limit :#maxLimit"
+ "FullQuery": "select c.eid from a as c limit :#maxLimit"
}
# for update
"select eid from a for update"
{
- "PlanID": "PASS_SELECT",
- "Reason": "LOCK",
+ "PlanID": "SELECT_LOCK",
"TableName": "a",
"FieldQuery": "select eid from a where 1 != 1",
"FullQuery": "select eid from a limit :#maxLimit for update"
@@ -199,505 +138,12 @@
# lock in share mode
"select eid from a lock in share mode"
{
- "PlanID": "PASS_SELECT",
- "Reason": "LOCK",
+ "PlanID": "SELECT_LOCK",
"TableName": "a",
"FieldQuery": "select eid from a where 1 != 1",
"FullQuery": "select eid from a limit :#maxLimit lock in share mode"
}
-# composite pk supplied values
-"select * from a where eid = 1 and id in (1, 2)"
-{
- "PlanID": "PK_IN",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where eid = 1 and id in (1, 2) limit :#maxLimit",
- "OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3, 4],
- "PKValues": [1,[1,2]]
-}
-
-# positional arguments
-"select * from a where eid = ? and id in (?, ?)"
-{
- "PlanID": "PK_IN",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where eid = :v1 and id in (:v2, :v3) limit :#maxLimit",
- "OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3, 4],
- "PKValues": [":v1",[":v2",":v3"]]
-}
-
-# composite pk subquery
-"select * from a where name = 'foo'"
-{
- "PlanID": "SELECT_SUBQUERY",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where name = 'foo' limit :#maxLimit",
- "OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
- "Subquery": "select eid, id from a use index (b_name) where name = 'foo' limit :#maxLimit",
- "IndexUsed": "b_name",
- "ColumnNumbers": [0, 1, 2, 3, 4]
-}
-
-# covering index
-"select eid, name, id from a where name = 'foo'"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "COVERING",
- "TableName": "a",
- "FieldQuery": "select eid, name, id from a where 1 != 1",
- "FullQuery": "select eid, name, id from a where name = 'foo' limit :#maxLimit",
- "IndexUsed": "b_name",
- "ColumnNumbers": [0, 2, 1]
-}
-
-# subquery
-"select * from d where id = 1"
-{
- "PlanID": "SELECT_SUBQUERY",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where id = 1 limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "Subquery": "select name from d use index (d_id) where id = 1 limit :#maxLimit",
- "IndexUsed": "d_id",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# subquery with limit
-"select * from d where id = 1 limit 1"
-{
- "PlanID": "SELECT_SUBQUERY",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where id = 1 limit 1",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "Subquery": "select name from d use index (d_id) where id = 1 limit 1",
- "IndexUsed": "d_id",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# complex where (expression)
-"select * from a where eid+1 = 1"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where eid + 1 = 1 limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3, 4]
-}
-
-# complex where (non-value operand)
-"select * from a where eid = id"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where eid = id limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3, 4]
-}
-
-# inequality on pk columns
-"select * from d where name between 'foo' and 'bar'"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "PKINDEX",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name between 'foo' and 'bar' limit :#maxLimit",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# (condition)
-"select * from a where (eid=1) and (id=2)"
-{
- "PlanID": "PK_IN",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where (eid = 1) and (id = 2) limit :#maxLimit",
- "OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3, 4],
- "PKValues": [1, 2]
-}
-
-# pk match
-"select * from a where eid=1 and id=1"
-{
- "PlanID": "PK_IN",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where eid = 1 and id = 1 limit :#maxLimit",
- "OuterQuery": "select eid, id, name, foo, CamelCase from a where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3, 4],
- "PKValues": [1, 1]
-}
-
-# disjoint index match
-"select * from d where bar='foo' and id=1"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "NOINDEX_MATCH",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where bar = 'foo' and id = 1 limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# string pk match
-"select * from d where name='foo'"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name = 'foo' limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": ["foo"]
-}
-
-# string pk match with limit
-"select * from d where name='foo' limit 1"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name = 'foo' limit 1",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": ["foo"],
- "Limit": 1
-}
-
-# string pk match with limit 0
-"select * from d where name='foo' limit 0"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name = 'foo' limit 0",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": ["foo"],
- "Limit": 0
-}
-
-# string pk match with limit bindvar
-"select * from d where name='foo' limit :a"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name = 'foo' limit :a",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": ["foo"],
- "Limit": ":a"
-}
-
-# string pk match with offset limit
-"select * from d where name='foo' limit 1, 1"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "LIMIT",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name = 'foo' limit 1, 1",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# string pk match with invalid limit
-"select * from d where name='foo' limit (1)"
-"unexpected node for rowcount: [[49]]"
-
-# string pk match with negative limit
-"select * from d where name='foo' limit -1"
-"negative limit: -1"
-
-# string pk match with negative offset
-"select * from d where name='foo' limit -1, 1"
-"negative offset: -1"
-
-# reversed conditions with and clause
-"select * from d where 'foo'=name and eid=1"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where 'foo' = name and eid = 1 limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# pk IN
-"select * from d where name in ('foo', 'bar')"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name in ('foo', 'bar') limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": [["foo", "bar"]]
-}
-
-# pk IN parameter list
-"select * from d where name in (:a,:b)"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name in (:a, :b) limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": [[":a", ":b"]]
-}
-
-# pk IN, single value list
-"select * from d where name in ('foo')"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name in ('foo') limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": [["foo"]]
-}
-
-# pk IN, single value parameter list
-"select * from d where name in (:a)"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name in (:a) limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": [[":a"]]
-}
-
-# pk IN, limit clause
-"select * from d where name in (:a) limit 1"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name in (:a) limit 1",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": [[":a"]],
- "Limit": 1
-}
-
-# double pk IN
-"select * from a where eid in (1) and id in (1, 2)"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where eid in (1) and id in (1, 2) limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3, 4]
-}
-
-# double pk IN 2
-"select * from a where eid in (1, 2) and id in (1, 2)"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where eid in (1, 2) and id in (1, 2) limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3, 4]
-}
-
-# pk as tuple
-"select * from a where (eid, id) in ((1, 1), (2, 2))"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where (eid, id) in ((1, 1), (2, 2)) limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3, 4]
-}
-
-# no index match
-"select * from d where foo='bar'"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "NOINDEX_MATCH",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where foo = 'bar' limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# table alias
-"select * from d as c where c.name='foo'"
-{
- "PlanID": "PK_IN",
- "TableName": "d",
- "FieldQuery": "select * from d as c where 1 != 1",
- "FullQuery": "select * from d as c where c.name = 'foo' limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d as c where :#pk",
- "IndexUsed": "PRIMARY",
- "ColumnNumbers": [0, 1, 2, 3],
- "PKValues": ["foo"]
-}
-
-# non-pk inequality match
-"select * from d where id<0"
-{
- "PlanID": "SELECT_SUBQUERY",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where id \u003c 0 limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "Subquery": "select name from d use index (d_id) where id \u003c 0 limit :#maxLimit",
- "IndexUsed": "d_id",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# non-pk IN non-value operand
-"select * from d where name in ('foo', id)"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name in ('foo', id) limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# non-pk between
-"select * from d where id between 1 and 2"
-{
- "PlanID": "SELECT_SUBQUERY",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where id between 1 and 2 limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "Subquery": "select name from d use index (d_id) where id between 1 and 2 limit :#maxLimit",
- "IndexUsed": "d_id",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# non-pk not between
-"select * from d where id not between 1 and 2"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where id not between 1 and 2 limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# non-column between
-"select * from d where 1 between 1 and 2"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where 1 between 1 and 2 limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# complex predicate
-"select * from d where name is not null"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where name is not null limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# order by
-"select * from a where eid=1 and id=1 order by name"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "ORDER",
- "TableName": "a",
- "FieldQuery": "select * from a where 1 != 1",
- "FullQuery": "select * from a where eid = 1 and id = 1 order by name asc limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3, 4]
-}
-
-# cardinality override
-"select * from d where bar = 'foo'"
-{
- "PlanID": "SELECT_SUBQUERY",
- "TableName": "d",
- "FieldQuery": "select * from d where 1 != 1",
- "FullQuery": "select * from d where bar = 'foo' limit :#maxLimit",
- "OuterQuery": "select name, id, foo, bar from d where :#pk",
- "Subquery": "select name from d use index (d_bar) where bar = 'foo' limit :#maxLimit",
- "IndexUsed": "d_bar",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# index override (use)
-"select * from d use index(d_bar_never) where bar = 'foo'"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "HAS_HINTS",
- "TableName": "d",
- "FieldQuery": "select * from d use index (d_bar_never) where 1 != 1",
- "FullQuery": "select * from d use index (d_bar_never) where bar = 'foo' limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# index override (force)
-"select * from d force index(d_bar_never) where bar = 'foo'"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "HAS_HINTS",
- "TableName": "d",
- "FieldQuery": "select * from d force index (d_bar_never) where 1 != 1",
- "FullQuery": "select * from d force index (d_bar_never) where bar = 'foo' limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# camel case preserved
-"select Name, id, FOO, bar from d"
-{
- "PlanID": "PASS_SELECT",
- "Reason": "WHERE",
- "TableName": "d",
- "FieldQuery": "select Name, id, FOO, bar from d where 1 != 1",
- "FullQuery": "select Name, id, FOO, bar from d limit :#maxLimit",
- "ColumnNumbers": [0, 1, 2, 3]
-}
-
-# column not found
-"select missing from a"
-"column missing not found in table a"
-
# insert cross-db
"insert into b.a (eid, id) values (1, :a)"
{
@@ -706,18 +152,8 @@
"FullQuery": "insert into b.a(eid, id) values (1, :a)"
}
-# insert with qualified column names
-"insert into a (a.eid, id) values (1, 2)"
-{
- "PlanID": "INSERT_PK",
- "TableName": "a",
- "FullQuery": "insert into a(a.eid, id) values (1, 2)",
- "OuterQuery": "insert into a(a.eid, id) values (1, 2)",
- "PKValues": [1, 2]
-}
-
# insert sub-select
-"insert into a (a.eid, id) values (select * from b)"
+"insert into a (eid, id) values (select * from b)"
"row subquery not supported for inserts"
# insert with bind value
@@ -920,17 +356,6 @@
"update b set eid=1.2"
"type mismatch: strconv.ParseUint: parsing "1.2": invalid syntax"
-# pk changed as qualified column name
-"update b set a.eid=1"
-{
- "PlanID": "DML_SUBQUERY",
- "TableName": "b",
- "FullQuery": "update b set a.eid = 1",
- "OuterQuery": "update b set a.eid = 1 where :#pk",
- "Subquery": "select eid, id from b limit :#maxLimit for update",
- "SecondaryPKValues": [1, null]
-}
-
# complex pk change
"update b set eid=foo()"
{
@@ -970,16 +395,6 @@
"PKValues": [1, 1]
}
-# update with qualified column name
-"update a set a.name='foo' where eid=1 and id=1"
-{
- "PlanID": "DML_PK",
- "TableName": "a",
- "FullQuery": "update a set a.name = 'foo' where eid = 1 and id = 1",
- "OuterQuery": "update a set a.name = 'foo' where :#pk",
- "PKValues": [1, 1]
-}
-
# partial pk
"update a set name='foo' where eid=1"
{
@@ -990,6 +405,16 @@
"Subquery": "select eid, id from a where eid = 1 limit :#maxLimit for update"
}
+# bad pk
+"update a set name='foo' where eid=1.0 and id=1"
+{
+ "PlanID": "DML_SUBQUERY",
+ "TableName": "a",
+ "FullQuery": "update a set name = 'foo' where eid = 1.0 and id = 1",
+ "OuterQuery": "update a set name = 'foo' where :#pk",
+ "Subquery": "select eid, id from a where eid = 1.0 and id = 1 limit :#maxLimit for update"
+}
+
# partial pk with limit
"update a set name='foo' where eid=1 limit 10"
{
@@ -1019,6 +444,56 @@
"FullQuery": "update c set eid = 1"
}
+# complex expression in where
+"update a set name='foo' where eid+1=1 and id=1"
+{
+ "PlanID":"DML_SUBQUERY",
+ "TableName":"a",
+ "FullQuery":"update a set name = 'foo' where eid + 1 = 1 and id = 1",
+ "OuterQuery":"update a set name = 'foo' where :#pk",
+ "Subquery":"select eid, id from a where eid + 1 = 1 and id = 1 limit :#maxLimit for update"
+}
+
+# parenthesized expressions in where
+"update a set name='foo' where (eid=1) and id=1"
+{
+ "PlanID": "DML_PK",
+ "TableName": "a",
+ "FullQuery": "update a set name = 'foo' where (eid = 1) and id = 1",
+ "OuterQuery": "update a set name = 'foo' where :#pk",
+ "PKValues": [1, 1]
+}
+
+# in clause expression in where
+"update a set name='foo' where eid in (1, 2) and id=1"
+{
+ "PlanID":"DML_PK",
+ "TableName":"a",
+ "FullQuery":"update a set name = 'foo' where eid in (1, 2) and id = 1",
+ "OuterQuery":"update a set name = 'foo' where :#pk",
+ "PKValues":[[1,2],1]
+}
+
+# double in clause
+"update a set name='foo' where eid in (1, 2) and id in (1, 2)"
+{
+ "PlanID":"DML_SUBQUERY",
+ "TableName":"a",
+ "FullQuery":"update a set name = 'foo' where eid in (1, 2) and id in (1, 2)",
+ "OuterQuery":"update a set name = 'foo' where :#pk",
+ "Subquery":"select eid, id from a where eid in (1, 2) and id in (1, 2) limit :#maxLimit for update"
+}
+
+# double use of pk
+"update a set name='foo' where eid=1 and eid=2"
+{
+ "PlanID":"DML_SUBQUERY",
+ "TableName":"a",
+ "FullQuery":"update a set name = 'foo' where eid = 1 and eid = 2",
+ "OuterQuery":"update a set name = 'foo' where :#pk",
+ "Subquery":"select eid, id from a where eid = 1 and eid = 2 limit :#maxLimit for update"
+}
+
# delete cross-db
"delete from b.a where eid=1 and id=1"
{
@@ -1067,6 +542,16 @@
"Subquery": "select eid, id from a where eid = 1 limit :#maxLimit for update"
}
+# bad pk value delete
+"delete from a where eid=1.0 and id=1"
+{
+ "PlanID": "DML_SUBQUERY",
+ "TableName": "a",
+ "FullQuery": "delete from a where eid = 1.0 and id = 1",
+ "OuterQuery": "delete from a where :#pk",
+ "Subquery": "select eid, id from a where eid = 1.0 and id = 1 limit :#maxLimit for update"
+}
+
# non-pk
"delete from a where eid=1 and name='foo'"
{
@@ -1086,6 +571,56 @@
"FullQuery": "delete from c"
}
+# delete complex expression in where
+"delete from a where eid+1=1 and id=1"
+{
+ "PlanID":"DML_SUBQUERY",
+ "TableName":"a",
+ "FullQuery":"delete from a where eid + 1 = 1 and id = 1",
+ "OuterQuery":"delete from a where :#pk",
+ "Subquery":"select eid, id from a where eid + 1 = 1 and id = 1 limit :#maxLimit for update"
+}
+
+# parenthesized expressions in where
+"delete from a where (eid=1) and id=1"
+{
+ "PlanID": "DML_PK",
+ "TableName": "a",
+ "FullQuery": "delete from a where (eid = 1) and id = 1",
+ "OuterQuery": "delete from a where :#pk",
+ "PKValues": [1, 1]
+}
+
+# delete in clause expression in where
+"delete from a where eid in (1, 2) and id=1"
+{
+ "PlanID":"DML_PK",
+ "TableName":"a",
+ "FullQuery":"delete from a where eid in (1, 2) and id = 1",
+ "OuterQuery":"delete from a where :#pk",
+ "PKValues":[[1,2],1]
+}
+
+# delete double in clause
+"delete from a where eid in (1, 2) and id in (1, 2)"
+{
+ "PlanID":"DML_SUBQUERY",
+ "TableName":"a",
+ "FullQuery":"delete from a where eid in (1, 2) and id in (1, 2)",
+ "OuterQuery":"delete from a where :#pk",
+ "Subquery":"select eid, id from a where eid in (1, 2) and id in (1, 2) limit :#maxLimit for update"
+}
+
+# delete double use of pk
+"delete from a where eid=1 and eid=2"
+{
+ "PlanID":"DML_SUBQUERY",
+ "TableName":"a",
+ "FullQuery":"delete from a where eid = 1 and eid = 2",
+ "OuterQuery":"delete from a where :#pk",
+ "Subquery":"select eid, id from a where eid = 1 and eid = 2 limit :#maxLimit for update"
+}
+
# sequence
"select next value from seq"
{
@@ -1105,26 +640,21 @@
"set a=1"
{
"PlanID": "SET",
- "FullQuery": "set a = 1",
- "SetKey": "a",
- "SetValue": 1
+ "FullQuery": "set a = 1"
}
# float
"set a=1.2"
{
"PlanID": "SET",
- "FullQuery": "set a = 1.2",
- "SetKey": "a",
- "SetValue": 1.2
+ "FullQuery": "set a = 1.2"
}
# string
"set a='b'"
{
"PlanID": "SET",
- "FullQuery": "set a = 'b'",
- "SetKey": "a"
+ "FullQuery": "set a = 'b'"
}
# multi
@@ -1193,10 +723,26 @@
"PlanID": "OTHER"
}
-# table not found
+# table not found select
"select * from aaaa"
"table aaaa not found in schema"
+# table not found update
+"update aaaa set a=1"
+"table aaaa not found in schema"
+
+# table not found update
+"delete from aaaa"
+"table aaaa not found in schema"
+
+# table not found update
+"insert into aaaa values(1)"
+"table aaaa not found in schema"
+
+# column not found insert with subquery
+"insert into a(missing) select * from b"
+"column missing not found in table a"
+
# syntax error
"syntax error"
"syntax error at position 7 near 'syntax'"
diff --git a/data/test/tabletserver/schema_test.json b/data/test/tabletserver/schema_test.json
index 476c654f31f..eeb5dcbe503 100644
--- a/data/test/tabletserver/schema_test.json
+++ b/data/test/tabletserver/schema_test.json
@@ -100,7 +100,7 @@
0,
1
],
- "Type": 1
+ "Type": 0
},
{
"Name": "b",
@@ -306,52 +306,11 @@
"PKColumns": [
0
],
- "Type": 1
- },
- {
- "Name": "e",
- "Columns": [
- {
- "Name": "eid",
- "Category": 1,
- "IsAuto": false,
- "Default": 0
- },
- {
- "Name": "id",
- "Category": 1,
- "IsAuto": false,
- "Default": 0
- }
- ],
- "Indexes": [
- {
- "Name": "PRIMARY",
- "Columns": [
- "eid",
- "id"
- ],
- "Cardinality": [
- 1,
- 1
- ],
- "DataColumns": [
- "eid",
- "id"
- ]
- }
- ],
- "PKColumns": [
- 0,
- 1,
- 0,
- 1
- ],
- "Type": 2
+ "Type": 0
},
{
"Name": "seq",
- "Type": 3
+ "Type": 1
},
{
"Name": "dual",
diff --git a/data/test/tabletserver/stream_cases.txt b/data/test/tabletserver/stream_cases.txt
index 3b7b3ed5f97..99801a4c552 100644
--- a/data/test/tabletserver/stream_cases.txt
+++ b/data/test/tabletserver/stream_cases.txt
@@ -17,10 +17,6 @@
"select * from a for update"
"select with lock not allowed for streaming"
-# select from dual"
-"select 1 from dual"
-"select from dual not allowed for streaming"
-
# union
"select * from a union select * from b"
{
diff --git a/examples/kubernetes/vttablet-pod-benchmarking-template.yaml b/examples/kubernetes/vttablet-pod-benchmarking-template.yaml
index 58008a8245f..f7d9e29f3a7 100644
--- a/examples/kubernetes/vttablet-pod-benchmarking-template.yaml
+++ b/examples/kubernetes/vttablet-pod-benchmarking-template.yaml
@@ -71,10 +71,7 @@ spec:
-queryserver-config-transaction-cap 300
-queryserver-config-schema-reload-time 1
-queryserver-config-pool-size 100
- -enable-rowcache
-enable_replication_reporter
- -rowcache-bin /usr/bin/memcached
- -rowcache-socket $VTDATAROOT/{{tablet_subdir}}/memcache.sock" vitess
env:
- name: GOMAXPROCS
value: "16"
diff --git a/examples/kubernetes/vttablet-pod-template.yaml b/examples/kubernetes/vttablet-pod-template.yaml
index aba3296f9c6..871d3eb05fc 100644
--- a/examples/kubernetes/vttablet-pod-template.yaml
+++ b/examples/kubernetes/vttablet-pod-template.yaml
@@ -75,11 +75,8 @@ spec:
-db-config-filtered-uname vt_filtered
-db-config-filtered-dbname vt_{{keyspace}}
-db-config-filtered-charset utf8
- -enable-rowcache
-enable_semi_sync
-enable_replication_reporter
- -rowcache-bin /usr/bin/memcached
- -rowcache-socket $VTDATAROOT/{{tablet_subdir}}/memcache.sock
-restore_from_backup {{backup_flags}}" vitess
- name: mysql
image: vitess/lite
diff --git a/examples/local/vttablet-up.sh b/examples/local/vttablet-up.sh
index cffa6716d1c..48bdbcae695 100755
--- a/examples/local/vttablet-up.sh
+++ b/examples/local/vttablet-up.sh
@@ -107,11 +107,8 @@ for uid_index in $uids; do
-init_shard $shard \
-target_tablet_type $tablet_type \
-health_check_interval 5s \
- -enable-rowcache \
-enable_semi_sync \
-enable_replication_reporter \
- -rowcache-bin $memcached_path \
- -rowcache-socket $VTDATAROOT/$tablet_dir/memcache.sock \
-backup_storage_implementation file \
-file_backup_storage_root $VTDATAROOT/backups \
-restore_from_backup \
diff --git a/go/cmd/vttablet/status.go b/go/cmd/vttablet/status.go
index 0cdbc1cb5d8..bbf147d8eef 100644
--- a/go/cmd/vttablet/status.go
+++ b/go/cmd/vttablet/status.go
@@ -68,7 +68,6 @@ var (
Health Check
Query Service Health Check
- Memcache
Current Stream Queries
|
diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go
index b577188e190..e1594ceb294 100644
--- a/go/cmd/vttablet/vttablet.go
+++ b/go/cmd/vttablet/vttablet.go
@@ -23,15 +23,12 @@ import (
// import mysql to register mysql connection function
_ "github.com/youtube/vitess/go/mysql"
- // import memcache to register memcache connection function
- _ "github.com/youtube/vitess/go/memcache"
)
var (
enforceTableACLConfig = flag.Bool("enforce-tableacl-config", false, "if this flag is true, vttablet will fail to start if a valid tableacl config does not exist")
tableAclConfig = flag.String("table-acl-config", "", "path to table access checker config file")
tabletPath = flag.String("tablet-path", "", "tablet alias")
- overridesFile = flag.String("schema-override", "", "schema overrides file")
agent *tabletmanager.ActionAgent
)
@@ -123,7 +120,7 @@ func main() {
if servenv.GRPCPort != nil {
gRPCPort = int32(*servenv.GRPCPort)
}
- agent, err = tabletmanager.NewActionAgent(context.Background(), mysqld, qsc, tabletAlias, dbcfgs, mycnf, int32(*servenv.Port), gRPCPort, *overridesFile)
+ agent, err = tabletmanager.NewActionAgent(context.Background(), mysqld, qsc, tabletAlias, dbcfgs, mycnf, int32(*servenv.Port), gRPCPort)
if err != nil {
log.Error(err)
exit.Return(1)
diff --git a/go/vt/mysqlctl/mycnf.go b/go/vt/mysqlctl/mycnf.go
index 007e558f0b9..a9eb7e73620 100644
--- a/go/vt/mysqlctl/mycnf.go
+++ b/go/vt/mysqlctl/mycnf.go
@@ -68,7 +68,7 @@ type Mycnf struct {
RelayLogInfoPath string
// BinLogPath is the base path for binlogs
- // (used by vt software for binlog streaming and rowcache invalidation)
+ // (used by vt software for binlog streaming)
BinLogPath string
// MasterInfoFile is the master.info file location.
diff --git a/go/vt/schema/schema.go b/go/vt/schema/schema.go
index 3f6e08deee2..4e454bb5fc5 100644
--- a/go/vt/schema/schema.go
+++ b/go/vt/schema/schema.go
@@ -14,14 +14,19 @@ import (
querypb "github.com/youtube/vitess/go/vt/proto/query"
)
-// Cache types
+// Table types
const (
- CacheNone = 0
- CacheRW = 1
- CacheW = 2
- Sequence = 3
+ NoType = iota
+ Sequence
)
+// TypeNames allows to fetch a the type name for a table.
+// Count must match the number of table types.
+var TypeNames = []string{
+ "none",
+ "sequence",
+}
+
// TableColumn contains info about a table's column.
type TableColumn struct {
Name cistring.CIString
@@ -52,17 +57,6 @@ func NewTable(name string) *Table {
}
}
-// IsCached returns true if the table has a rowcache association.
-func (ta *Table) IsCached() bool {
- return ta.Type == CacheRW || ta.Type == CacheW
-}
-
-// IsReadCached returns true if the rowcache can be used for reads.
-// TODO(sougou): remove after deprecating schema overrides.
-func (ta *Table) IsReadCached() bool {
- return ta.Type == CacheRW
-}
-
// AddColumn adds a column to the Table.
func (ta *Table) AddColumn(name string, columnType querypb.Type, defval sqltypes.Value, extra string) {
index := len(ta.Columns)
diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go
index 72b27ac9f5a..19efde84454 100644
--- a/go/vt/sqlparser/analyzer.go
+++ b/go/vt/sqlparser/analyzer.go
@@ -22,15 +22,6 @@ func GetTableName(node SimpleTableExpr) string {
return ""
}
-// GetColName returns the column name, only if
-// it's a simple expression. Otherwise, it returns "".
-func GetColName(node Expr) ColIdent {
- if n, ok := node.(*ColName); ok {
- return n.Name
- }
- return ColIdent{}
-}
-
// IsColName returns true if the ValExpr is a *ColName.
func IsColName(node ValExpr) bool {
_, ok := node.(*ColName)
@@ -56,16 +47,6 @@ func IsNull(node ValExpr) bool {
return false
}
-// HasINClause returns true if any of the conditions has an IN clause.
-func HasINClause(conditions []BoolExpr) bool {
- for _, node := range conditions {
- if c, ok := node.(*ComparisonExpr); ok && c.Operator == InStr {
- return true
- }
- }
- return false
-}
-
// IsSimpleTuple returns true if the ValExpr is a ValTuple that
// contains simple values or if it's a list arg.
func IsSimpleTuple(node ValExpr) bool {
diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go
index 447c6323ffc..67dbf736a57 100644
--- a/go/vt/sqlparser/ast.go
+++ b/go/vt/sqlparser/ast.go
@@ -6,8 +6,6 @@ package sqlparser
import (
"errors"
- "fmt"
- "strconv"
"strings"
"github.com/youtube/vitess/go/cistring"
@@ -539,17 +537,19 @@ func (node Nextval) WalkSubtree(visit Visit) error {
}
// Columns represents an insert column list.
-// The syntax for Columns is a subset of SelectExprs.
-// So, it's castable to a SelectExprs and can be analyzed
-// as such.
-type Columns []SelectExpr
+type Columns []ColIdent
// Format formats the node.
func (node Columns) Format(buf *TrackedBuffer) {
if node == nil {
return
}
- buf.Myprintf("(%v)", SelectExprs(node))
+ prefix := "("
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+ buf.WriteString(")")
}
// WalkSubtree walks the nodes of the subtree
@@ -1582,49 +1582,6 @@ func (node *Limit) WalkSubtree(visit Visit) error {
)
}
-// Limits returns the values of the LIMIT clause as interfaces.
-// The returned values can be nil for absent field, string for
-// bind variable names, or int64 for an actual number.
-// Otherwise, it's an error.
-func (node *Limit) Limits() (offset, rowcount interface{}, err error) {
- if node == nil {
- return nil, nil, nil
- }
- switch v := node.Offset.(type) {
- case NumVal:
- o, err := strconv.ParseInt(string(v), 0, 64)
- if err != nil {
- return nil, nil, err
- }
- if o < 0 {
- return nil, nil, fmt.Errorf("negative offset: %d", o)
- }
- offset = o
- case ValArg:
- offset = string(v)
- case nil:
- // pass
- default:
- return nil, nil, fmt.Errorf("unexpected node for offset: %+v", v)
- }
- switch v := node.Rowcount.(type) {
- case NumVal:
- rc, err := strconv.ParseInt(string(v), 0, 64)
- if err != nil {
- return nil, nil, err
- }
- if rc < 0 {
- return nil, nil, fmt.Errorf("negative limit: %d", rc)
- }
- rowcount = rc
- case ValArg:
- rowcount = string(v)
- default:
- return nil, nil, fmt.Errorf("unexpected node for rowcount: %+v", v)
- }
- return offset, rowcount, nil
-}
-
// Values represents a VALUES clause.
type Values []RowTuple
@@ -1680,7 +1637,7 @@ func (node UpdateExprs) WalkSubtree(visit Visit) error {
// UpdateExpr represents an update expression.
type UpdateExpr struct {
- Name *ColName
+ Name ColIdent
Expr ValExpr
}
diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go
index ea98374d4d1..474565099b9 100644
--- a/go/vt/sqlparser/ast_test.go
+++ b/go/vt/sqlparser/ast_test.go
@@ -83,80 +83,6 @@ func TestWhere(t *testing.T) {
}
}
-func TestLimits(t *testing.T) {
- var l *Limit
- o, r, err := l.Limits()
- if o != nil || r != nil || err != nil {
- t.Errorf("got %v, %v, %v, want nils", o, r, err)
- }
-
- l = &Limit{Offset: NumVal([]byte("aa"))}
- _, _, err = l.Limits()
- wantErr := "strconv.ParseInt: parsing \"aa\": invalid syntax"
- if err == nil || err.Error() != wantErr {
- t.Errorf("got %v, want %s", err, wantErr)
- }
-
- l = &Limit{Offset: NumVal([]byte("2"))}
- _, _, err = l.Limits()
- wantErr = "unexpected node for rowcount: "
- if err == nil || err.Error() != wantErr {
- t.Errorf("got %v, want %s", err, wantErr)
- }
-
- l = &Limit{Offset: StrVal([]byte("2"))}
- _, _, err = l.Limits()
- wantErr = "unexpected node for offset: [50]"
- if err == nil || err.Error() != wantErr {
- t.Errorf("got %v, want %s", err, wantErr)
- }
-
- l = &Limit{Offset: NumVal([]byte("2")), Rowcount: NumVal([]byte("aa"))}
- _, _, err = l.Limits()
- wantErr = "strconv.ParseInt: parsing \"aa\": invalid syntax"
- if err == nil || err.Error() != wantErr {
- t.Errorf("got %v, want %s", err, wantErr)
- }
-
- l = &Limit{Offset: NumVal([]byte("2")), Rowcount: NumVal([]byte("3"))}
- o, r, err = l.Limits()
- if o.(int64) != 2 || r.(int64) != 3 || err != nil {
- t.Errorf("got %v %v %v, want 2, 3, nil", o, r, err)
- }
-
- l = &Limit{Offset: ValArg([]byte(":a")), Rowcount: NumVal([]byte("3"))}
- o, r, err = l.Limits()
- if o.(string) != ":a" || r.(int64) != 3 || err != nil {
- t.Errorf("got %v %v %v, want :a, 3, nil", o, r, err)
- }
-
- l = &Limit{Offset: nil, Rowcount: NumVal([]byte("3"))}
- o, r, err = l.Limits()
- if o != nil || r.(int64) != 3 || err != nil {
- t.Errorf("got %v %v %v, want nil, 3, nil", o, r, err)
- }
-
- l = &Limit{Offset: nil, Rowcount: ValArg([]byte(":a"))}
- o, r, err = l.Limits()
- if o != nil || r.(string) != ":a" || err != nil {
- t.Errorf("got %v %v %v, want nil, :a, nil", o, r, err)
- }
-
- l = &Limit{Offset: NumVal([]byte("-2")), Rowcount: NumVal([]byte("0"))}
- _, _, err = l.Limits()
- wantErr = "negative offset: -2"
- if err == nil || err.Error() != wantErr {
- t.Errorf("got %v, want %s", err, wantErr)
- }
-
- l = &Limit{Offset: NumVal([]byte("2")), Rowcount: NumVal([]byte("-2"))}
- _, _, err = l.Limits()
- wantErr = "negative limit: -2"
- if err == nil || err.Error() != wantErr {
- t.Errorf("got %v, want %s", err, wantErr)
- }
-}
-
func TestIsAggregate(t *testing.T) {
f := FuncExpr{Name: "avg"}
if !f.IsAggregate() {
diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go
index 36bf84bf6e6..66ec25a044d 100644
--- a/go/vt/sqlparser/parse_test.go
+++ b/go/vt/sqlparser/parse_test.go
@@ -396,14 +396,14 @@ func TestValid(t *testing.T) {
}, {
input: "insert /* multi-value list */ into a values (1, 2), (3, 4)",
}, {
- input: "insert /* set */ into a set a = 1, a.b = 2",
- output: "insert /* set */ into a(a, a.b) values (1, 2)",
+ input: "insert /* set */ into a set a = 1, b = 2",
+ output: "insert /* set */ into a(a, b) values (1, 2)",
}, {
input: "insert /* value expression list */ into a values (a + 1, 2 * 3)",
}, {
input: "insert /* column list */ into a(a, b) values (1, 2)",
}, {
- input: "insert /* qualified column list */ into a(a, a.b) values (1, 2)",
+ input: "insert /* qualified column list */ into a(a, b) values (1, 2)",
}, {
input: "insert /* select */ into a select b, c from d",
}, {
@@ -412,8 +412,6 @@ func TestValid(t *testing.T) {
input: "update /* simple */ a set b = 3",
}, {
input: "update /* a.b */ a.b set b = 3",
- }, {
- input: "update /* b.c */ a set b.c = 3",
}, {
input: "update /* list */ a set b = 3, c = 4",
}, {
diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go
index da58c5f5e4e..93cb8c11fa5 100644
--- a/go/vt/sqlparser/sql.go
+++ b/go/vt/sqlparser/sql.go
@@ -277,7 +277,7 @@ var yyExca = [...]int{
-1, 1,
1, -1,
-2, 0,
- -1, 67,
+ -1, 106,
45, 221,
89, 221,
-2, 220,
@@ -289,143 +289,149 @@ const yyPrivate = 57344
var yyTokenNames []string
var yyStates []string
-const yyLast = 760
+const yyLast = 813
var yyAct = [...]int{
- 142, 336, 62, 108, 285, 401, 114, 115, 215, 235,
- 294, 65, 276, 199, 247, 225, 326, 138, 104, 241,
- 149, 227, 198, 3, 35, 77, 37, 155, 69, 47,
- 38, 103, 40, 59, 41, 66, 354, 356, 41, 224,
- 153, 64, 226, 75, 43, 44, 45, 80, 98, 71,
- 50, 229, 74, 384, 383, 48, 49, 382, 70, 73,
- 46, 157, 59, 42, 364, 125, 83, 125, 63, 92,
- 219, 130, 91, 196, 96, 87, 180, 99, 183, 184,
- 185, 180, 107, 67, 58, 414, 59, 66, 137, 136,
- 66, 102, 145, 64, 355, 170, 64, 169, 168, 158,
- 277, 95, 159, 144, 97, 152, 154, 151, 90, 91,
- 93, 166, 170, 88, 370, 195, 197, 234, 164, 84,
- 156, 254, 146, 178, 186, 187, 181, 182, 183, 184,
- 185, 180, 160, 59, 252, 253, 251, 135, 277, 72,
- 324, 67, 168, 217, 179, 178, 186, 187, 181, 182,
- 183, 184, 185, 180, 214, 109, 170, 305, 140, 169,
- 168, 221, 59, 107, 141, 366, 165, 145, 107, 311,
- 312, 313, 239, 240, 170, 125, 125, 233, 144, 60,
- 222, 232, 250, 60, 210, 60, 200, 14, 94, 143,
- 201, 202, 203, 204, 72, 78, 206, 258, 89, 236,
- 238, 410, 238, 271, 238, 209, 107, 269, 270, 272,
- 169, 168, 271, 66, 66, 281, 273, 218, 371, 64,
- 283, 289, 368, 284, 290, 170, 125, 125, 28, 60,
- 304, 288, 59, 230, 280, 378, 109, 274, 107, 162,
- 238, 109, 327, 291, 249, 246, 309, 308, 255, 256,
- 257, 220, 259, 260, 261, 262, 263, 264, 265, 266,
- 267, 268, 314, 242, 244, 245, 315, 82, 243, 310,
- 238, 112, 181, 182, 183, 184, 185, 180, 327, 109,
- 89, 325, 292, 238, 321, 332, 238, 329, 323, 333,
- 101, 212, 331, 59, 59, 59, 59, 167, 330, 139,
- 161, 112, 112, 230, 381, 380, 357, 358, 125, 76,
- 205, 109, 346, 352, 207, 342, 345, 344, 359, 338,
- 360, 139, 218, 249, 361, 213, 316, 317, 318, 112,
- 86, 39, 167, 66, 292, 341, 162, 343, 387, 369,
- 296, 299, 300, 301, 297, 320, 298, 302, 349, 347,
- 379, 231, 112, 350, 348, 79, 89, 112, 112, 112,
- 334, 337, 248, 57, 230, 230, 230, 230, 296, 299,
- 300, 301, 297, 390, 298, 302, 388, 55, 367, 85,
- 392, 14, 394, 395, 391, 351, 147, 300, 301, 407,
- 54, 393, 100, 363, 307, 112, 400, 286, 365, 377,
- 66, 408, 405, 403, 404, 279, 64, 287, 402, 402,
- 402, 409, 216, 411, 412, 14, 15, 16, 17, 51,
- 52, 231, 415, 139, 376, 134, 416, 112, 417, 237,
- 29, 124, 133, 340, 61, 413, 385, 18, 399, 14,
- 386, 248, 28, 30, 389, 337, 31, 32, 33, 34,
- 1, 218, 306, 303, 362, 125, 163, 238, 67, 126,
- 127, 128, 148, 36, 129, 122, 123, 112, 223, 111,
- 150, 131, 179, 178, 186, 187, 181, 182, 183, 184,
- 185, 180, 231, 231, 231, 231, 373, 374, 68, 132,
- 116, 117, 105, 282, 211, 124, 118, 406, 119, 372,
- 19, 20, 22, 21, 23, 335, 375, 339, 322, 208,
- 275, 120, 121, 24, 25, 26, 328, 113, 278, 125,
- 171, 110, 67, 126, 127, 128, 353, 295, 129, 122,
- 123, 293, 228, 111, 106, 131, 81, 14, 53, 27,
- 179, 178, 186, 187, 181, 182, 183, 184, 185, 180,
- 56, 13, 124, 12, 116, 117, 105, 11, 10, 9,
- 118, 8, 119, 7, 6, 5, 112, 4, 112, 112,
- 124, 2, 396, 397, 398, 120, 125, 0, 0, 67,
- 126, 127, 128, 14, 0, 129, 122, 123, 0, 0,
- 111, 0, 131, 0, 125, 0, 0, 67, 126, 127,
- 128, 0, 0, 129, 122, 123, 0, 0, 111, 0,
- 131, 116, 117, 0, 0, 0, 0, 118, 0, 119,
- 0, 0, 125, 0, 0, 67, 126, 127, 128, 116,
- 117, 129, 120, 0, 0, 118, 125, 119, 131, 67,
- 126, 127, 128, 0, 0, 129, 0, 0, 0, 0,
- 120, 0, 131, 0, 0, 0, 0, 116, 117, 0,
- 0, 0, 0, 118, 0, 119, 0, 0, 0, 0,
- 0, 116, 117, 0, 0, 0, 0, 118, 120, 119,
- 0, 0, 0, 0, 0, 0, 0, 173, 176, 72,
- 0, 0, 120, 188, 189, 190, 191, 192, 193, 194,
- 177, 174, 175, 172, 179, 178, 186, 187, 181, 182,
- 183, 184, 185, 180, 319, 0, 0, 179, 178, 186,
- 187, 181, 182, 183, 184, 185, 180, 0, 0, 0,
- 0, 0, 179, 178, 186, 187, 181, 182, 183, 184,
- 185, 180, 179, 178, 186, 187, 181, 182, 183, 184,
- 185, 180, 186, 187, 181, 182, 183, 184, 185, 180,
+ 127, 192, 283, 233, 62, 112, 211, 223, 195, 324,
+ 291, 400, 334, 222, 274, 245, 221, 136, 239, 194,
+ 3, 145, 74, 225, 99, 67, 35, 100, 37, 40,
+ 70, 41, 38, 41, 220, 64, 94, 383, 69, 382,
+ 47, 71, 381, 68, 151, 46, 42, 50, 363, 128,
+ 352, 354, 14, 306, 122, 80, 58, 149, 104, 43,
+ 44, 45, 84, 176, 413, 122, 48, 49, 174, 182,
+ 183, 177, 178, 179, 180, 181, 176, 98, 153, 63,
+ 105, 275, 166, 322, 64, 85, 106, 64, 134, 139,
+ 164, 122, 87, 275, 106, 123, 124, 125, 214, 89,
+ 126, 81, 122, 160, 166, 60, 133, 129, 353, 162,
+ 91, 196, 93, 165, 164, 197, 198, 199, 200, 365,
+ 230, 65, 148, 150, 147, 302, 113, 114, 166, 142,
+ 161, 205, 115, 122, 116, 409, 236, 152, 60, 156,
+ 248, 106, 252, 215, 165, 164, 217, 117, 179, 180,
+ 181, 176, 210, 60, 206, 250, 251, 249, 65, 166,
+ 105, 90, 229, 231, 28, 105, 138, 191, 193, 244,
+ 75, 202, 253, 254, 255, 86, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 228, 218, 234, 309,
+ 310, 311, 256, 325, 236, 86, 213, 267, 268, 270,
+ 157, 271, 269, 105, 165, 164, 236, 226, 14, 64,
+ 281, 141, 122, 279, 137, 231, 137, 282, 247, 166,
+ 240, 242, 243, 237, 238, 241, 272, 278, 182, 183,
+ 177, 178, 179, 180, 181, 176, 158, 105, 307, 288,
+ 269, 236, 158, 236, 305, 289, 236, 122, 215, 289,
+ 60, 86, 314, 315, 316, 312, 330, 236, 370, 367,
+ 308, 208, 73, 377, 325, 313, 293, 296, 297, 298,
+ 294, 318, 295, 299, 216, 97, 319, 226, 122, 349,
+ 380, 297, 298, 327, 329, 331, 332, 335, 321, 328,
+ 109, 177, 178, 179, 180, 181, 176, 347, 340, 247,
+ 342, 345, 348, 339, 79, 341, 346, 358, 76, 357,
+ 350, 39, 359, 379, 344, 163, 360, 343, 406, 362,
+ 109, 109, 55, 369, 364, 83, 14, 336, 386, 201,
+ 407, 368, 366, 203, 323, 54, 82, 143, 96, 226,
+ 226, 226, 226, 57, 304, 209, 51, 52, 284, 109,
+ 277, 376, 163, 175, 174, 182, 183, 177, 178, 179,
+ 180, 181, 176, 384, 285, 212, 375, 385, 338, 227,
+ 109, 388, 335, 387, 137, 109, 109, 109, 215, 132,
+ 246, 390, 61, 389, 140, 392, 131, 412, 398, 14,
+ 28, 30, 1, 399, 303, 300, 401, 401, 401, 64,
+ 159, 144, 36, 404, 408, 219, 410, 411, 402, 403,
+ 414, 146, 66, 109, 415, 130, 416, 59, 293, 296,
+ 297, 298, 294, 280, 295, 299, 29, 72, 378, 207,
+ 405, 77, 371, 333, 391, 374, 393, 394, 337, 227,
+ 320, 204, 31, 32, 33, 34, 59, 109, 273, 118,
+ 111, 88, 326, 110, 276, 92, 167, 107, 95, 351,
+ 292, 246, 290, 103, 224, 102, 78, 59, 53, 135,
+ 27, 56, 13, 14, 15, 16, 17, 372, 373, 154,
+ 12, 11, 155, 10, 9, 8, 7, 109, 235, 6,
+ 121, 5, 4, 2, 0, 18, 0, 0, 0, 0,
+ 0, 227, 227, 227, 227, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 122, 59, 236, 106, 123, 124,
+ 125, 0, 0, 126, 119, 120, 0, 0, 108, 0,
+ 129, 175, 174, 182, 183, 177, 178, 179, 180, 181,
+ 176, 0, 59, 103, 0, 0, 0, 232, 103, 113,
+ 114, 101, 0, 0, 0, 115, 361, 116, 19, 20,
+ 22, 21, 23, 0, 0, 0, 0, 0, 0, 0,
+ 117, 24, 25, 26, 175, 174, 182, 183, 177, 178,
+ 179, 180, 181, 176, 0, 0, 103, 109, 0, 109,
+ 109, 0, 0, 395, 396, 397, 0, 0, 121, 232,
+ 0, 286, 0, 0, 287, 0, 0, 0, 0, 0,
+ 301, 0, 59, 0, 0, 0, 0, 0, 0, 0,
+ 103, 0, 122, 0, 317, 106, 123, 124, 125, 0,
+ 0, 126, 119, 120, 0, 0, 108, 0, 129, 0,
+ 0, 14, 175, 174, 182, 183, 177, 178, 179, 180,
+ 181, 176, 0, 0, 0, 0, 121, 113, 114, 101,
+ 0, 0, 0, 115, 0, 116, 0, 0, 0, 0,
+ 0, 0, 0, 121, 59, 59, 59, 59, 117, 0,
+ 122, 0, 0, 106, 123, 124, 125, 355, 356, 126,
+ 119, 120, 0, 0, 108, 0, 129, 122, 0, 0,
+ 106, 123, 124, 125, 0, 0, 126, 119, 120, 0,
+ 0, 108, 0, 129, 65, 113, 114, 0, 0, 0,
+ 0, 115, 0, 116, 0, 122, 0, 0, 106, 123,
+ 124, 125, 113, 114, 126, 0, 117, 0, 115, 0,
+ 116, 129, 175, 174, 182, 183, 177, 178, 179, 180,
+ 181, 176, 0, 117, 0, 0, 0, 0, 0, 0,
+ 113, 114, 0, 0, 0, 0, 115, 0, 116, 0,
+ 0, 0, 0, 0, 0, 0, 169, 172, 0, 0,
+ 0, 117, 184, 185, 186, 187, 188, 189, 190, 173,
+ 170, 171, 168, 175, 174, 182, 183, 177, 178, 179,
+ 180, 181, 176, 175, 174, 182, 183, 177, 178, 179,
+ 180, 181, 176,
}
var yyPact = [...]int{
- 409, -1000, -1000, 437, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -72,
- -66, -33, -52, -36, -1000, -1000, -1000, 433, 401, 358,
- -1000, -62, 135, 424, 93, -73, -39, 91, -1000, -37,
- 91, -1000, 135, -76, 147, -76, 135, -1000, -1000, -1000,
- -1000, -1000, -1000, 232, 91, -1000, 66, 355, 302, -14,
- -1000, 135, 152, -1000, 43, -1000, -17, -1000, 135, 51,
- 140, -1000, -1000, 135, -1000, -51, 135, 371, 246, 91,
- -1000, 474, -1000, 415, -1000, 135, 93, 135, 412, 93,
- 591, 93, -1000, 365, -83, -1000, 13, -1000, 135, -1000,
- -1000, 135, -1000, 290, -1000, -1000, 146, 22, 40, 628,
- -1000, 549, 531, -1000, -1000, -1000, 591, 591, 591, 591,
- 182, -1000, -1000, -1000, 182, -1000, -1000, -1000, -1000, -1000,
- -1000, 591, 135, -1000, -1000, 263, 310, -1000, 398, 549,
- -1000, 666, 20, 577, -1000, -19, -1000, -1000, 207, 91,
- -1000, -60, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, 131, 474, -1000, -1000, 91, 35, 410, 549, 549,
- 209, 591, 130, 61, 591, 591, 591, 209, 591, 591,
- 591, 591, 591, 591, 591, 591, 591, 591, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 23, 628, 153, 223, 157,
- 628, -1000, -1000, -1000, 641, 474, -1000, 433, 38, 666,
- -1000, 375, 93, 93, 398, 381, 392, 40, 666, 91,
- 135, -1000, -1000, 135, -1000, 288, 334, -1000, -1000, 137,
- 374, 181, -1000, -1000, -1000, -1000, 193, 474, -1000, 23,
- 84, -1000, -1000, 115, -1000, -1000, 666, -1000, 577, -1000,
- -1000, 130, 591, 591, 591, 666, 666, 656, -1000, 674,
- 46, -1000, -4, -4, -9, -9, -9, 192, 192, -1000,
- -1000, 591, -1000, -1000, 193, 76, -1000, 549, 198, 182,
- 437, 234, 239, -1000, 381, -1000, 591, 591, -1000, -1000,
- -1000, 421, 131, 131, 131, 131, -1000, 282, 278, -1000,
- 315, 314, 351, -6, -1000, 135, 135, -1000, 236, -1000,
- 193, -1000, -1000, -1000, 157, -1000, 666, 666, 396, 591,
- 666, -1000, -26, -1000, 591, 102, -1000, 353, 176, -1000,
- -1000, -1000, 93, -1000, 68, 172, -1000, 464, -1000, 411,
- 384, 334, 191, 306, -1000, -1000, -1000, -1000, 271, -1000,
- 270, -1000, -1000, -1000, -40, -43, -44, -1000, -1000, -1000,
- -1000, -1000, 591, 666, -1000, 666, 591, 312, 182, -1000,
- 591, 591, -1000, -1000, -1000, 398, 549, 591, 549, 549,
- -1000, -1000, 182, 182, 182, 666, 666, 430, -1000, 666,
- -1000, 381, 40, 166, 40, 40, 91, 91, 91, 93,
- 372, 155, -1000, 155, 155, 152, -1000, 427, 10, -1000,
- 91, -1000, -1000, -1000, 91, -1000, 91, -1000,
+ 467, -1000, -1000, 385, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -70,
+ -69, -50, -37, -51, -1000, -1000, -1000, 383, 328, 303,
+ -1000, -67, 90, 372, 73, -76, -54, 73, -1000, -66,
+ 73, -1000, 90, -79, 122, -79, 90, -1000, -1000, -1000,
+ -1000, -1000, -1000, 269, 73, -1000, 48, 312, 297, -27,
+ -1000, 90, 129, -1000, 27, -1000, 90, 40, 113, -1000,
+ 90, -1000, -63, 90, 317, 231, 73, -1000, 577, -1000,
+ 369, -1000, 90, 73, 90, 363, 73, 680, -1000, 316,
+ -82, -1000, 30, -1000, 90, -1000, -1000, 90, -1000, 190,
+ -1000, -1000, 110, 20, 87, 717, -1000, -1000, 652, 635,
+ -1000, -1000, -1000, 680, 680, 680, 680, 167, -1000, -1000,
+ -1000, 167, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 680,
+ 90, -1000, -1000, 233, 205, -1000, 351, 652, -1000, 727,
+ 9, 46, -1000, -1000, 230, 73, -1000, -65, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 57, 577, -1000,
+ -1000, 73, 38, 469, 652, 652, 166, 680, 88, 82,
+ 680, 680, 680, 166, 680, 680, 680, 680, 680, 680,
+ 680, 680, 680, 680, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 10, 717, 147, 159, 194, 717, -1000, -1000, -1000,
+ 666, 577, -1000, 383, 31, 727, -1000, 320, 73, 73,
+ 351, 332, 349, 87, 93, 727, 90, -1000, -1000, 90,
+ -1000, 203, 232, -1000, -1000, 105, 324, 202, -1000, -1000,
+ -1000, -1000, -36, -1000, 196, 577, -1000, 10, 32, -1000,
+ -1000, 135, -1000, -1000, 727, -1000, 46, -1000, -1000, 88,
+ 680, 680, 680, 727, 727, 566, -1000, 150, -9, -1000,
+ 66, 66, -22, -22, -22, 211, 211, -1000, -1000, 680,
+ -1000, -1000, 196, 19, -1000, 652, 220, 167, 385, 149,
+ 210, -1000, 332, -1000, 680, 680, -1000, -1000, 356, 57,
+ 57, 57, 57, -1000, 283, 280, -1000, 267, 263, 245,
+ 8, -1000, 90, 90, -1000, 199, 73, -1000, 196, -1000,
+ -1000, -1000, 194, -1000, 727, 727, 498, 680, 727, -1000,
+ -42, -1000, 680, 56, -1000, 307, 213, -1000, -1000, -1000,
+ 73, -1000, 277, 212, -1000, 455, -1000, 353, 336, 232,
+ 219, 384, -1000, -1000, -1000, -1000, 279, -1000, 246, -1000,
+ -1000, -1000, -55, -58, -60, -1000, -1000, -1000, -1000, -1000,
+ -1000, 680, 727, -1000, 727, 680, 302, 167, -1000, 680,
+ 680, -1000, -1000, -1000, 351, 652, 680, 652, 652, -1000,
+ -1000, 167, 167, 167, 727, 727, 380, -1000, 727, -1000,
+ 332, 87, 156, 87, 87, 73, 73, 73, 73, 301,
+ 89, -1000, 89, 89, 129, -1000, 379, -11, -1000, 73,
+ -1000, -1000, -1000, 73, -1000, 73, -1000,
}
var yyPgo = [...]int{
- 0, 571, 22, 567, 565, 564, 563, 561, 559, 558,
- 557, 553, 551, 430, 550, 539, 538, 536, 31, 18,
- 534, 15, 42, 21, 532, 531, 10, 527, 51, 526,
- 5, 17, 3, 521, 520, 518, 517, 73, 19, 14,
- 13, 516, 7, 71, 6, 512, 510, 12, 509, 508,
- 507, 506, 8, 505, 1, 499, 4, 497, 494, 493,
- 16, 2, 68, 489, 331, 309, 488, 470, 468, 463,
- 462, 11, 456, 0, 453, 452, 29, 450, 443, 189,
- 9,
+ 0, 493, 19, 492, 491, 489, 486, 485, 484, 483,
+ 481, 480, 472, 426, 471, 470, 468, 466, 24, 27,
+ 465, 16, 13, 7, 464, 462, 10, 460, 23, 459,
+ 11, 17, 58, 457, 456, 454, 453, 1, 18, 15,
+ 8, 452, 5, 49, 450, 449, 448, 14, 441, 440,
+ 438, 435, 6, 433, 12, 432, 2, 430, 429, 423,
+ 9, 4, 79, 415, 311, 262, 412, 411, 405, 402,
+ 401, 0, 400, 384, 395, 394, 40, 392, 391, 211,
+ 3,
}
var yyR1 = [...]int{
@@ -487,42 +493,42 @@ var yyChk = [...]int{
-78, -13, -13, -13, -13, 96, -69, 98, 102, -64,
98, 100, 96, 96, 97, 98, 96, -76, -76, -76,
-2, 18, 19, -16, 32, 19, -14, -64, -28, -73,
- 48, 10, -61, -62, -44, -71, -73, 48, -66, 101,
- 97, -71, 48, 96, -71, -73, -65, 101, 48, -65,
- -73, -17, 35, -71, 53, 24, 28, 89, -28, 46,
- 65, 89, -73, 59, 48, -76, -73, -76, 99, -73,
- 21, 44, -71, -18, -19, 82, -20, -73, -32, -37,
- -33, 59, -79, -36, -44, -42, 80, 81, 86, 88,
- 101, -45, 55, 56, 21, 45, 49, 50, 51, 54,
- -43, 61, -63, 17, 10, -28, -61, -73, -31, 11,
- -62, -37, -73, -79, -71, -73, -76, 21, -70, 103,
- -67, 94, 92, 27, 93, 14, 107, 48, -73, -73,
- -76, 10, 46, -72, -71, 20, 89, -79, 58, 57,
- 72, -34, 75, 59, 73, 74, 60, 72, 77, 76,
- 85, 80, 81, 82, 83, 84, 78, 79, 65, 66,
- 67, 68, 69, 70, 71, -32, -37, -32, -2, -40,
- -37, -37, -37, -37, -37, -79, -43, -79, -48, -37,
- -28, -58, 28, -79, -31, -52, 14, -32, -37, 89,
- 44, -71, -76, -68, 99, -21, -22, -23, -24, -28,
- -43, -79, -19, -71, 82, -80, -18, 19, 47, -32,
- -32, -38, 54, 59, 55, 56, -37, -39, -79, -43,
- 52, 75, 73, 74, 60, -37, -37, -37, -38, -37,
- -37, -37, -37, -37, -37, -37, -37, -37, -37, -80,
- -80, 46, -80, -71, -18, -46, -47, 62, -35, 30,
- -2, -61, -59, -44, -52, -56, 16, 15, -71, -73,
- -73, -31, 46, -25, -26, -27, 34, 38, 40, 35,
- 36, 37, 41, -74, -73, 20, -75, 20, -21, -80,
- -18, 54, 55, 56, -40, -39, -37, -37, -37, 58,
- -37, -80, -49, -47, 64, -32, -60, 44, -41, -42,
- -60, -80, 46, -56, -37, -53, -54, -37, -76, -50,
- 12, -22, -23, -22, -23, 34, 34, 34, 39, 34,
- 39, 34, -26, -29, 42, 100, 43, -73, -73, -80,
- -80, -80, 58, -37, 90, -37, 63, 25, 46, -44,
- 46, 46, -55, 22, 23, -51, 13, 15, 44, 44,
- 34, 34, 97, 97, 97, -37, -37, 26, -42, -37,
- -54, -52, -32, -40, -32, -32, -79, -79, -79, 8,
- -56, -30, -71, -30, -30, -61, -57, 17, 29, -80,
- 46, -80, -80, 8, 75, -71, -71, -71,
+ 48, 10, -61, -62, -71, 48, -66, 101, 97, -71,
+ 96, -71, -73, -65, 101, 48, -65, -73, -17, 35,
+ -71, 53, 24, 28, 89, -28, 46, 65, -73, 59,
+ 48, -76, -73, -76, 99, -73, 21, 44, -71, -18,
+ -19, 82, -20, -73, -32, -37, 48, -33, 59, -79,
+ -36, -44, -42, 80, 81, 86, 88, 101, -45, 55,
+ 56, 21, 45, 49, 50, 51, 54, -71, -43, 61,
+ -63, 17, 10, -28, -61, -73, -31, 11, -62, -37,
+ -73, -79, -76, 21, -70, 103, -67, 94, 92, 27,
+ 93, 14, 107, 48, -73, -73, -76, 10, 46, -72,
+ -71, 20, 89, -79, 58, 57, 72, -34, 75, 59,
+ 73, 74, 60, 72, 77, 76, 85, 80, 81, 82,
+ 83, 84, 78, 79, 65, 66, 67, 68, 69, 70,
+ 71, -32, -37, -32, -2, -40, -37, -37, -37, -37,
+ -37, -79, -43, -79, -48, -37, -28, -58, 28, -79,
+ -31, -52, 14, -32, 89, -37, 44, -71, -76, -68,
+ 99, -21, -22, -23, -24, -28, -43, -79, -19, -71,
+ 82, -71, -73, -80, -18, 19, 47, -32, -32, -38,
+ 54, 59, 55, 56, -37, -39, -79, -43, 52, 75,
+ 73, 74, 60, -37, -37, -37, -38, -37, -37, -37,
+ -37, -37, -37, -37, -37, -37, -37, -80, -80, 46,
+ -80, -71, -18, -46, -47, 62, -35, 30, -2, -61,
+ -59, -71, -52, -56, 16, 15, -73, -73, -31, 46,
+ -25, -26, -27, 34, 38, 40, 35, 36, 37, 41,
+ -74, -73, 20, -75, 20, -21, 89, -80, -18, 54,
+ 55, 56, -40, -39, -37, -37, -37, 58, -37, -80,
+ -49, -47, 64, -32, -60, 44, -41, -42, -60, -80,
+ 46, -56, -37, -53, -54, -37, -76, -50, 12, -22,
+ -23, -22, -23, 34, 34, 34, 39, 34, 39, 34,
+ -26, -29, 42, 100, 43, -73, -73, -80, -71, -80,
+ -80, 58, -37, 90, -37, 63, 25, 46, -71, 46,
+ 46, -55, 22, 23, -51, 13, 15, 44, 44, 34,
+ 34, 97, 97, 97, -37, -37, 26, -42, -37, -54,
+ -52, -32, -40, -32, -32, -79, -79, -79, 8, -56,
+ -30, -71, -30, -30, -61, -57, 17, 29, -80, 46,
+ -80, -80, 8, 75, -71, -71, -71,
}
var yyDef = [...]int{
@@ -532,42 +538,42 @@ var yyDef = [...]int{
37, 206, 0, 0, 0, 204, 0, 0, 217, 0,
0, 207, 0, 202, 0, 202, 0, 32, 33, 34,
15, 40, 41, 44, 0, 43, 36, 0, 0, 82,
- 221, 0, 20, 197, 0, 160, 0, -2, 0, 0,
- 0, 224, 220, 0, 224, 0, 0, 0, 0, 0,
- 31, 0, 45, 0, 38, 0, 0, 0, 90, 0,
- 0, 0, 224, 0, 218, 23, 0, 26, 0, 28,
- 203, 0, 224, 0, 46, 48, 53, 0, 51, 52,
- 92, 0, 0, 130, 131, 132, 0, 0, 0, 0,
- 0, 151, 98, 99, 0, 222, 163, 164, 165, 166,
- 196, 153, 0, 200, 201, 185, 90, 83, 171, 0,
- 198, 199, 0, 0, 161, 0, 21, 205, 0, 0,
- 224, 214, 208, 209, 210, 211, 212, 213, 27, 29,
- 30, 0, 0, 49, 54, 0, 0, 0, 0, 0,
+ 221, 0, 20, 197, 0, 220, 0, 0, 0, 224,
+ 0, 224, 0, 0, 0, 0, 0, 31, 0, 45,
+ 0, 38, 0, 0, 0, 90, 0, 0, 224, 0,
+ 218, 23, 0, 26, 0, 28, 203, 0, 224, 0,
+ 46, 48, 53, 0, 51, 52, -2, 92, 0, 0,
+ 130, 131, 132, 0, 0, 0, 0, 0, 151, 98,
+ 99, 0, 222, 163, 164, 165, 166, 160, 196, 153,
+ 0, 200, 201, 185, 90, 83, 171, 0, 198, 199,
+ 0, 0, 21, 205, 0, 0, 224, 214, 208, 209,
+ 210, 211, 212, 213, 27, 29, 30, 0, 0, 49,
+ 54, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 117, 118,
- 119, 120, 121, 122, 123, 95, 0, 0, 0, 0,
- 128, 143, 144, 145, 0, 0, 110, 0, 0, 154,
- 14, 0, 0, 0, 171, 179, 0, 91, 128, 0,
- 0, 219, 24, 0, 215, 90, 56, 58, 59, 69,
- 67, 0, 47, 55, 50, 147, 0, 0, 223, 93,
- 94, 97, 111, 0, 113, 115, 100, 101, 0, 125,
- 126, 0, 0, 0, 0, 103, 105, 0, 109, 133,
- 134, 135, 136, 137, 138, 139, 140, 141, 142, 96,
- 127, 0, 195, 146, 0, 158, 155, 0, 189, 0,
- 192, 189, 0, 187, 179, 19, 0, 0, 162, 224,
- 25, 167, 0, 0, 0, 0, 72, 0, 0, 75,
- 0, 0, 0, 84, 70, 0, 0, 68, 0, 148,
- 0, 112, 114, 116, 0, 102, 104, 106, 0, 0,
- 129, 150, 0, 156, 0, 0, 16, 0, 191, 193,
- 17, 186, 0, 18, 180, 172, 173, 176, 22, 169,
- 0, 57, 63, 0, 66, 73, 74, 76, 0, 78,
- 0, 80, 81, 60, 0, 0, 0, 71, 61, 62,
- 149, 124, 0, 107, 152, 159, 0, 0, 0, 188,
- 0, 0, 175, 177, 178, 171, 0, 0, 0, 0,
- 77, 79, 0, 0, 0, 108, 157, 0, 194, 181,
- 174, 179, 170, 168, 64, 65, 0, 0, 0, 0,
- 182, 0, 88, 0, 0, 190, 13, 0, 0, 85,
- 0, 86, 87, 183, 0, 89, 0, 184,
+ 0, 0, 0, 0, 117, 118, 119, 120, 121, 122,
+ 123, 95, 0, 0, 0, 0, 128, 143, 144, 145,
+ 0, 0, 110, 0, 0, 154, 14, 0, 0, 0,
+ 171, 179, 0, 91, 0, 128, 0, 219, 24, 0,
+ 215, 90, 56, 58, 59, 69, 67, 0, 47, 55,
+ 50, 161, 0, 147, 0, 0, 223, 93, 94, 97,
+ 111, 0, 113, 115, 100, 101, 0, 125, 126, 0,
+ 0, 0, 0, 103, 105, 0, 109, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 96, 127, 0,
+ 195, 146, 0, 158, 155, 0, 189, 0, 192, 189,
+ 0, 187, 179, 19, 0, 0, 224, 25, 167, 0,
+ 0, 0, 0, 72, 0, 0, 75, 0, 0, 0,
+ 84, 70, 0, 0, 68, 0, 0, 148, 0, 112,
+ 114, 116, 0, 102, 104, 106, 0, 0, 129, 150,
+ 0, 156, 0, 0, 16, 0, 191, 193, 17, 186,
+ 0, 18, 180, 172, 173, 176, 22, 169, 0, 57,
+ 63, 0, 66, 73, 74, 76, 0, 78, 0, 80,
+ 81, 60, 0, 0, 0, 71, 61, 62, 162, 149,
+ 124, 0, 107, 152, 159, 0, 0, 0, 188, 0,
+ 0, 175, 177, 178, 171, 0, 0, 0, 0, 77,
+ 79, 0, 0, 0, 108, 157, 0, 194, 181, 174,
+ 179, 170, 168, 64, 65, 0, 0, 0, 0, 182,
+ 0, 88, 0, 0, 190, 13, 0, 0, 85, 0,
+ 86, 87, 183, 0, 89, 0, 184,
}
var yyTok1 = [...]int{
@@ -984,9 +990,9 @@ yydefault:
{
cols := make(Columns, 0, len(yyDollar[7].updateExprs))
vals := make(ValTuple, 0, len(yyDollar[7].updateExprs))
- for _, col := range yyDollar[7].updateExprs {
- cols = append(cols, &NonStarExpr{Expr: col.Name})
- vals = append(vals, col.Expr)
+ for _, updateList := range yyDollar[7].updateExprs {
+ cols = append(cols, updateList.Name)
+ vals = append(vals, updateList.Expr)
}
yyVAL.statement = &Insert{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, Table: yyDollar[5].tableName, Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprs)}
}
@@ -2023,13 +2029,13 @@ yydefault:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1021
{
- yyVAL.columns = Columns{&NonStarExpr{Expr: yyDollar[1].colName}}
+ yyVAL.columns = Columns{yyDollar[1].colIdent}
}
case 188:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:1025
{
- yyVAL.columns = append(yyVAL.columns, &NonStarExpr{Expr: yyDollar[3].colName})
+ yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent)
}
case 189:
yyDollar = yyS[yypt-0 : yypt+1]
@@ -2095,7 +2101,7 @@ yydefault:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:1080
{
- yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].valExpr}
+ yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colIdent, Expr: yyDollar[3].valExpr}
}
case 202:
yyDollar = yyS[yypt-0 : yypt+1]
diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y
index fca4ff03071..3fb30dbb113 100644
--- a/go/vt/sqlparser/sql.y
+++ b/go/vt/sqlparser/sql.y
@@ -214,9 +214,9 @@ insert_statement:
{
cols := make(Columns, 0, len($7))
vals := make(ValTuple, 0, len($7))
- for _, col := range $7 {
- cols = append(cols, &NonStarExpr{Expr: col.Name})
- vals = append(vals, col.Expr)
+ for _, updateList := range $7 {
+ cols = append(cols, updateList.Name)
+ vals = append(vals, updateList.Expr)
}
$$ = &Insert{Comments: Comments($2), Ignore: $3, Table: $5, Columns: cols, Rows: Values{vals}, OnDup: OnDup($8)}
}
@@ -1017,13 +1017,13 @@ column_list_opt:
}
column_list:
- column_name
+ sql_id
{
- $$ = Columns{&NonStarExpr{Expr: $1}}
+ $$ = Columns{$1}
}
-| column_list ',' column_name
+| column_list ',' sql_id
{
- $$ = append($$, &NonStarExpr{Expr: $3})
+ $$ = append($$, $3)
}
on_dup_opt:
@@ -1076,7 +1076,7 @@ update_list:
}
update_expression:
- column_name '=' value_expression
+ sql_id '=' value_expression
{
$$ = &UpdateExpr{Name: $1, Expr: $3}
}
diff --git a/go/vt/tabletmanager/action_agent.go b/go/vt/tabletmanager/action_agent.go
index 3b779da082e..d5a99c63bfc 100644
--- a/go/vt/tabletmanager/action_agent.go
+++ b/go/vt/tabletmanager/action_agent.go
@@ -23,10 +23,8 @@ package tabletmanager
import (
"encoding/hex"
- "encoding/json"
"flag"
"fmt"
- "io/ioutil"
"net"
"os"
"path"
@@ -82,7 +80,6 @@ type ActionAgent struct {
TabletAlias *topodatapb.TabletAlias
MysqlDaemon mysqlctl.MysqlDaemon
DBConfigs dbconfigs.DBConfigs
- SchemaOverrides []tabletserver.SchemaOverride
BinlogPlayerMap *BinlogPlayerMap
// exportStats is set only for production tablet.
@@ -174,25 +171,6 @@ type ActionAgent struct {
_slaveStopped *bool
}
-func loadSchemaOverrides(overridesFile string) []tabletserver.SchemaOverride {
- var schemaOverrides []tabletserver.SchemaOverride
- if overridesFile == "" {
- return schemaOverrides
- }
- data, err := ioutil.ReadFile(overridesFile)
- if err != nil {
- log.Warningf("can't read overridesFile %v: %v", overridesFile, err)
- return schemaOverrides
- }
- if err = json.Unmarshal(data, &schemaOverrides); err != nil {
- log.Warningf("can't parse overridesFile %v: %v", overridesFile, err)
- return schemaOverrides
- }
- data, _ = json.MarshalIndent(schemaOverrides, "", " ")
- log.Infof("schemaOverrides: %s\n", data)
- return schemaOverrides
-}
-
// NewActionAgent creates a new ActionAgent and registers all the
// associated services.
//
@@ -206,10 +184,7 @@ func NewActionAgent(
dbcfgs dbconfigs.DBConfigs,
mycnf *mysqlctl.Mycnf,
port, gRPCPort int32,
- overridesFile string,
) (agent *ActionAgent, err error) {
- schemaOverrides := loadSchemaOverrides(overridesFile)
-
topoServer := topo.GetServer()
agent = &ActionAgent{
@@ -220,7 +195,6 @@ func NewActionAgent(
TabletAlias: tabletAlias,
MysqlDaemon: mysqld,
DBConfigs: dbcfgs,
- SchemaOverrides: schemaOverrides,
History: history.New(historyLength),
_healthy: fmt.Errorf("healthcheck not run yet"),
}
@@ -300,7 +274,6 @@ func NewTestActionAgent(batchCtx context.Context, ts topo.Server, tabletAlias *t
TabletAlias: tabletAlias,
MysqlDaemon: mysqlDaemon,
DBConfigs: dbconfigs.DBConfigs{},
- SchemaOverrides: nil,
BinlogPlayerMap: nil,
History: history.New(historyLength),
_healthy: fmt.Errorf("healthcheck not run yet"),
@@ -327,7 +300,6 @@ func NewComboActionAgent(batchCtx context.Context, ts topo.Server, tabletAlias *
TabletAlias: tabletAlias,
MysqlDaemon: mysqlDaemon,
DBConfigs: dbcfgs,
- SchemaOverrides: nil,
BinlogPlayerMap: nil,
skipMysqlPortCheck: true,
History: history.New(historyLength),
@@ -593,7 +565,7 @@ func (agent *ActionAgent) Start(ctx context.Context, mysqlPort, vtPort, gRPCPort
Keyspace: tablet.Keyspace,
Shard: tablet.Shard,
TabletType: tablet.Type,
- }, agent.DBConfigs, agent.SchemaOverrides, agent.MysqlDaemon); err != nil {
+ }, agent.DBConfigs, agent.MysqlDaemon); err != nil {
return fmt.Errorf("failed to InitDBConfig: %v", err)
}
diff --git a/go/vt/tabletmanager/init_tablet_test.go b/go/vt/tabletmanager/init_tablet_test.go
index c1e6b1a8e62..515755ceb3a 100644
--- a/go/vt/tabletmanager/init_tablet_test.go
+++ b/go/vt/tabletmanager/init_tablet_test.go
@@ -40,7 +40,6 @@ func TestInitTablet(t *testing.T) {
TabletAlias: tabletAlias,
MysqlDaemon: mysqlDaemon,
DBConfigs: dbconfigs.DBConfigs{},
- SchemaOverrides: nil,
BinlogPlayerMap: nil,
batchCtx: ctx,
History: history.New(historyLength),
diff --git a/go/vt/tabletserver/cache_pool.go b/go/vt/tabletserver/cache_pool.go
deleted file mode 100644
index 9546a94fda5..00000000000
--- a/go/vt/tabletserver/cache_pool.go
+++ /dev/null
@@ -1,346 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tabletserver
-
-import (
- "io/ioutil"
- "net/http"
- "os"
- "os/exec"
- "path"
- "sync"
- "time"
-
- log "github.com/golang/glog"
- "github.com/youtube/vitess/go/acl"
- "github.com/youtube/vitess/go/cacheservice"
- "github.com/youtube/vitess/go/pools"
- "github.com/youtube/vitess/go/stats"
- "github.com/youtube/vitess/go/sync2"
- vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc"
- "golang.org/x/net/context"
-)
-
-// CachePool re-exposes ResourcePool as a pool of Memcache connection objects.
-type CachePool struct {
- name string
- pool *pools.ResourcePool
- maxPrefix sync2.AtomicInt64
- cmd *exec.Cmd
- rowCacheConfig RowCacheConfig
- capacity int
- socket string
- idleTimeout time.Duration
- memcacheStats *MemcacheStats
- queryServiceStats *QueryServiceStats
- mu sync.Mutex
- statsURL string
-}
-
-// NewCachePool creates a new pool for rowcache connections.
-func NewCachePool(
- name string,
- rowCacheConfig RowCacheConfig,
- idleTimeout time.Duration,
- statsURL string,
- enablePublishStats bool,
- queryServiceStats *QueryServiceStats) *CachePool {
- cp := &CachePool{
- name: name,
- idleTimeout: idleTimeout,
- statsURL: statsURL,
- queryServiceStats: queryServiceStats,
- }
- if name != "" && enablePublishStats {
- cp.memcacheStats = NewMemcacheStats(
- rowCacheConfig.StatsPrefix+name, 10*time.Second, enableMain,
- queryServiceStats,
- func(key string) string {
- conn := cp.Get(context.Background())
- // This is not the same as defer cachePool.Put(conn)
- defer func() { cp.Put(conn) }()
- stats, err := conn.Stats(key)
- if err != nil {
- conn.Close()
- conn = nil
- log.Errorf("Cannot export memcache %v stats: %v", key, err)
- queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- return ""
- }
- return string(stats)
- })
- stats.Publish(name+"ConnPoolCapacity", stats.IntFunc(cp.Capacity))
- stats.Publish(name+"ConnPoolAvailable", stats.IntFunc(cp.Available))
- stats.Publish(name+"ConnPoolMaxCap", stats.IntFunc(cp.MaxCap))
- stats.Publish(name+"ConnPoolWaitCount", stats.IntFunc(cp.WaitCount))
- stats.Publish(name+"ConnPoolWaitTime", stats.DurationFunc(cp.WaitTime))
- stats.Publish(name+"ConnPoolIdleTimeout", stats.DurationFunc(cp.IdleTimeout))
- }
- http.Handle(statsURL, cp)
-
- if rowCacheConfig.Binary == "" {
- return cp
- }
- cp.rowCacheConfig = rowCacheConfig
-
- // Start with memcached defaults
- cp.capacity = 1024 - 50
- if rowCacheConfig.Connections > 0 {
- if rowCacheConfig.Connections <= 50 {
- log.Fatalf("insufficient capacity: %d", rowCacheConfig.Connections)
- }
- cp.capacity = rowCacheConfig.Connections - 50
- }
- return cp
-}
-
-// Open opens the pool. It launches memcache and waits till it's up.
-func (cp *CachePool) Open() {
- cp.mu.Lock()
- defer cp.mu.Unlock()
- if cp.pool != nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "rowcache is already open"))
- }
- if cp.rowCacheConfig.Binary == "" {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "rowcache binary not specified"))
- }
- cp.socket = generateFilename(cp.rowCacheConfig.Socket)
- cp.startCacheService()
- log.Infof("rowcache is enabled")
- f := func() (pools.Resource, error) {
- return cacheservice.Connect(cacheservice.Config{
- Address: cp.socket,
- Timeout: 10 * time.Second,
- })
- }
- cp.pool = pools.NewResourcePool(f, cp.capacity, cp.capacity, cp.idleTimeout)
- if cp.memcacheStats != nil {
- cp.memcacheStats.Open()
- }
-}
-
-// generateFilename generates a unique file name. It's convoluted.
-// There are race conditions when we have to come up with unique
-// names. So, this is a best effort.
-func generateFilename(hint string) string {
- dir, base := path.Split(hint)
- f, err := ioutil.TempFile(dir, base)
- if err != nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "error creating socket file: %v", err))
- }
- name := f.Name()
- err = f.Close()
- if err != nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "error closing socket file: %v", err))
- }
- err = os.Remove(name)
- if err != nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "error removing socket file: %v", err))
- }
- log.Infof("sock filename: %v", name)
- return name
-}
-
-func (cp *CachePool) startCacheService() {
- commandLine := cp.rowCacheConfig.GetSubprocessFlags(cp.socket)
- cp.cmd = exec.Command(commandLine[0], commandLine[1:]...)
- if err := cp.cmd.Start(); err != nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "can't start memcache: %v", err))
- }
- attempts := 0
- for {
- c, err := cacheservice.Connect(cacheservice.Config{
- Address: cp.socket,
- Timeout: 10 * time.Second,
- })
-
- if err != nil {
- attempts++
- if attempts >= 50 {
- cp.cmd.Process.Kill()
- // Avoid zombies
- go cp.cmd.Wait()
- // FIXME(sougou): Throw proper error if we can recover
- log.Fatalf("Can't connect to cache service: %s", cp.socket)
- }
- time.Sleep(100 * time.Millisecond)
- continue
- }
- if _, err = c.Set("health", 0, 0, []byte("ok")); err != nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "can't communicate with cache service: %v", err))
- }
- c.Close()
- break
- }
-}
-
-// Close closes the CachePool. It also shuts down memcache.
-// You can call Open again after Close.
-func (cp *CachePool) Close() {
- // Close the underlying pool first.
- // You cannot close the pool while holding the
- // lock because we have to still allow Put to
- // return outstanding connections, if any.
- pool := cp.getPool()
- if pool == nil {
- return
- }
- pool.Close()
-
- // No new operations will be allowed now.
- // Safe to cleanup.
- cp.mu.Lock()
- defer cp.mu.Unlock()
- if cp.pool == nil {
- return
- }
- if cp.memcacheStats != nil {
- cp.memcacheStats.Close()
- }
- cp.cmd.Process.Kill()
- // Avoid zombies
- go cp.cmd.Wait()
- _ = os.Remove(cp.socket)
- cp.socket = ""
- cp.pool = nil
-}
-
-// IsClosed returns true if CachePool is closed.
-func (cp *CachePool) IsClosed() bool {
- cp.mu.Lock()
- defer cp.mu.Unlock()
- return cp.pool == nil
-}
-
-func (cp *CachePool) getPool() *pools.ResourcePool {
- cp.mu.Lock()
- defer cp.mu.Unlock()
- return cp.pool
-}
-
-// Get returns a memcache connection from the pool.
-// You must call Put after Get.
-func (cp *CachePool) Get(ctx context.Context) cacheservice.CacheService {
- pool := cp.getPool()
- if pool == nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "cache pool is not open"))
- }
- r, err := pool.Get(ctx)
- if err != nil {
- panic(NewTabletErrorSQL(vtrpcpb.ErrorCode_INTERNAL_ERROR, err))
- }
- return r.(cacheservice.CacheService)
-}
-
-// Put returns the connection to the pool.
-func (cp *CachePool) Put(conn cacheservice.CacheService) {
- pool := cp.getPool()
- if pool == nil {
- return
- }
- if conn == nil {
- pool.Put(nil)
- } else {
- pool.Put(conn)
- }
-}
-
-// StatsJSON returns a JSON version of the CachePool stats.
-func (cp *CachePool) StatsJSON() string {
- pool := cp.getPool()
- if pool == nil {
- return "{}"
- }
- return pool.StatsJSON()
-}
-
-// Capacity returns the current capacity of the pool.
-func (cp *CachePool) Capacity() int64 {
- pool := cp.getPool()
- if pool == nil {
- return 0
- }
- return pool.Capacity()
-}
-
-// Available returns the number of available connections in the pool.
-func (cp *CachePool) Available() int64 {
- pool := cp.getPool()
- if pool == nil {
- return 0
- }
- return pool.Available()
-}
-
-// MaxCap returns the extent to which the pool capacity can be increased.
-func (cp *CachePool) MaxCap() int64 {
- pool := cp.getPool()
- if pool == nil {
- return 0
- }
- return pool.MaxCap()
-}
-
-// WaitCount returns the number of times we had to wait to get a connection
-// from the pool.
-func (cp *CachePool) WaitCount() int64 {
- pool := cp.getPool()
- if pool == nil {
- return 0
- }
- return pool.WaitCount()
-}
-
-// WaitTime returns the total amount of time spent waiting for a connection.
-func (cp *CachePool) WaitTime() time.Duration {
- pool := cp.getPool()
- if pool == nil {
- return 0
- }
- return pool.WaitTime()
-}
-
-// IdleTimeout returns the connection idle timeout.
-func (cp *CachePool) IdleTimeout() time.Duration {
- pool := cp.getPool()
- if pool == nil {
- return 0
- }
- return pool.IdleTimeout()
-}
-
-// ServeHTTP serves memcache stats as HTTP.
-func (cp *CachePool) ServeHTTP(response http.ResponseWriter, request *http.Request) {
- if err := acl.CheckAccessHTTP(request, acl.MONITORING); err != nil {
- acl.SendError(response, err)
- return
- }
- defer func() {
- if x := recover(); x != nil {
- response.Write(([]byte)(x.(error).Error()))
- }
- }()
- response.Header().Set("Content-Type", "text/plain")
- pool := cp.getPool()
- if pool == nil {
- response.Write(([]byte)("closed"))
- return
- }
- command := request.URL.Path[len(cp.statsURL):]
- if command == "stats" {
- command = ""
- }
- conn := cp.Get(context.Background())
- // This is not the same as defer cp.Put(conn)
- defer func() { cp.Put(conn) }()
- r, err := conn.Stats(command)
- if err != nil {
- conn.Close()
- conn = nil
- response.Write(([]byte)(err.Error()))
- } else {
- response.Write(r)
- }
-}
diff --git a/go/vt/tabletserver/cache_pool_test.go b/go/vt/tabletserver/cache_pool_test.go
deleted file mode 100644
index ae6798bcfb9..00000000000
--- a/go/vt/tabletserver/cache_pool_test.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2015, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tabletserver
-
-import (
- "fmt"
- "io/ioutil"
- "math/rand"
- "net/http"
- "net/http/httptest"
- "regexp"
- "testing"
- "time"
-
- "golang.org/x/net/context"
-
- "github.com/youtube/vitess/go/vt/tabletserver/fakecacheservice"
- "github.com/youtube/vitess/go/vt/vttest/fakesqldb"
-
- vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc"
-)
-
-func TestCachePoolWithEmptyBinary(t *testing.T) {
- fakecacheservice.Register()
- fakesqldb.Register()
- cachePool := newTestCachePool(RowCacheConfig{}, false)
- cachePool.Close()
-}
-
-func TestCachePool(t *testing.T) {
- fakecacheservice.Register()
- fakesqldb.Register()
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, false)
- if !cachePool.IsClosed() {
- t.Fatalf("cache pool is not closed")
- }
- cachePool.Open()
- if cachePool.IsClosed() {
- t.Fatalf("cache pool is closed")
- }
- cachePool.Close()
- if !cachePool.IsClosed() {
- t.Fatalf("cache pool is not closed")
- }
-}
-
-func TestCachePoolOpenTwice(t *testing.T) {
- fakecacheservice.Register()
- fakesqldb.Register()
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, false)
- cachePool.Open()
- defer cachePool.Close()
- defer func() {
- if e := recover(); e == nil {
- t.Fatalf("open an opened cache pool should panic")
- }
- }()
- cachePool.Open()
-}
-
-func TestCachePoolOpenWithEmptyBinary(t *testing.T) {
- fakecacheservice.Register()
- fakesqldb.Register()
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, false)
- defer func() {
- if e := recover(); e == nil {
- t.Fatalf("open a cache pool with empty rowCacheConfig.Binary should panic")
- }
- }()
- cachePool.rowCacheConfig.Binary = ""
- cachePool.Open()
- cachePool.Close()
-}
-
-func TestCachePoolOpenWithInvalidBinary(t *testing.T) {
- fakecacheservice.Register()
- fakesqldb.Register()
- rowCacheConfig := RowCacheConfig{
- Binary: "invalid_binary",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, false)
- defer func() {
- if e := recover(); e == nil {
- t.Fatalf("open a cache pool with an invalid rowCacheConfig.Binary should panic")
- }
- }()
- cachePool.Open()
- cachePool.Close()
-}
-
-func TestCachePoolState(t *testing.T) {
- fakecacheservice.Register()
- fakesqldb.Register()
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, true)
- idleTimeout := 1 * time.Second
- cachePool.idleTimeout = idleTimeout
- cachePool.Open()
- cachePool.memcacheStats.update()
- defer cachePool.Close()
- if cachePool.Available() <= 0 {
- t.Fatalf("cache pool should have connections available")
- }
- if cachePool.Capacity() <= 0 {
- t.Fatalf("cache pool should have positive capacity")
- }
- if cachePool.MaxCap() <= 0 {
- t.Fatalf("cache pool should have positive max cap")
- }
- if cachePool.WaitCount() > 0 {
- t.Fatalf("cache pool has never waited for a connection, WaitCount should return 0")
- }
- if cachePool.WaitTime() > 0 {
- t.Fatalf("cache pool has never waited for a connection, WaitTime should return 0")
- }
- if cachePool.IdleTimeout() != idleTimeout {
- t.Fatalf("cache pool's idle timeout does not match the specified one")
- }
- if len(cachePool.StatsJSON()) <= 0 {
- t.Fatalf("cache pool stats json should return non empty result")
- }
-}
-
-func TestCachePoolStateWithoutOpen(t *testing.T) {
- fakecacheservice.Register()
- fakesqldb.Register()
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, false)
- idleTimeout := 1 * time.Second
- cachePool.idleTimeout = idleTimeout
- if cachePool.StatsJSON() != "{}" {
- t.Fatalf("cache pool StatsJSON() should return {}")
- }
- if cachePool.Capacity() != 0 {
- t.Fatalf("cache pool Capacity() should return 0")
- }
- if cachePool.Available() != 0 {
- t.Fatalf("cache pool Available() should return 0")
- }
- if cachePool.MaxCap() != 0 {
- t.Fatalf("cache pool MaxCap() should return 0")
- }
- if cachePool.WaitCount() != 0 {
- t.Fatalf("cache pool WaitCount() should return 0")
- }
- if cachePool.WaitTime() != 0 {
- t.Fatalf("cache pool WaitTime() should return 0")
- }
- if cachePool.IdleTimeout() != 0 {
- t.Fatalf("cache pool IdleTimeout() should return 0")
- }
- cachePool.Put(nil)
-}
-
-func TestCachePoolGetFailedBecauseCachePoolIsClosed(t *testing.T) {
- fakecacheservice.Register()
- fakesqldb.Register()
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, false)
- idleTimeout := 1 * time.Second
- cachePool.idleTimeout = idleTimeout
- ctx := context.Background()
- defer func() {
- if err := recover(); err == nil {
- t.Fatalf("Get should fail because cache pool is closed")
- }
- }()
- cachePool.Get(ctx)
-}
-
-func TestCachePoolStatsURL(t *testing.T) {
- cache := fakecacheservice.Register()
- fakesqldb.Register()
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, false)
- idleTimeout := 1 * time.Second
- cachePool.idleTimeout = idleTimeout
- cachePool.Open()
- request, _ := http.NewRequest("GET", fmt.Sprintf("%sstats", cachePool.statsURL), nil)
- response := httptest.NewRecorder()
- cachePool.ServeHTTP(response, request)
- // any memcache calls should fail
- cache.EnableCacheServiceError()
- response = httptest.NewRecorder()
- cachePool.ServeHTTP(response, request)
- cache.DisableCacheServiceError()
- cachePool.Close()
- response = httptest.NewRecorder()
- cachePool.ServeHTTP(response, request)
- body, _ := ioutil.ReadAll(response.Body)
- matcher := regexp.MustCompile("closed")
- if !matcher.Match(body) {
- t.Fatalf("stats page should contain 'closed', but got %s", string(body))
- }
-}
-
-func TestCachePoolMemcacheStatsFail(t *testing.T) {
- cache := fakecacheservice.Register()
- fakesqldb.Register()
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, true)
- idleTimeout := 1 * time.Second
- cachePool.idleTimeout = idleTimeout
- cachePool.Open()
- defer cachePool.Close()
- memcacheStatsBefore := cachePool.queryServiceStats.InternalErrors.Counts()["MemcacheStats"]
- // any memcache calls should fail
- cache.EnableCacheServiceError()
- cachePool.memcacheStats.update()
- memcacheStatsAfter := cachePool.queryServiceStats.InternalErrors.Counts()["MemcacheStats"]
- if memcacheStatsAfter <= memcacheStatsBefore {
- t.Fatalf("memcache stats should cause an internal error")
- }
-}
-
-func TestCachePoolFailToStartBecauseCacheServiceWasDown(t *testing.T) {
- cache := fakecacheservice.Register()
- fakesqldb.Register()
- testUtils := &testUtils{}
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- cachePool := newTestCachePool(rowCacheConfig, false)
- idleTimeout := 1 * time.Second
- cachePool.idleTimeout = idleTimeout
- // any memcache calls should fail
- cache.EnableCacheServiceError()
- defer testUtils.checkTabletErrorWithRecover(t, vtrpcpb.ErrorCode_INTERNAL_ERROR, "can't communicate with cache service")
- cachePool.Open()
-}
-
-func newTestCachePool(rowcacheConfig RowCacheConfig, enablePublishStats bool) *CachePool {
- randID := rand.Int63()
- name := fmt.Sprintf("TestCachePool-%d-", randID)
- statsURL := fmt.Sprintf("/debug/cache-%d/", randID)
- queryServiceStats := NewQueryServiceStats(name, enablePublishStats)
- return NewCachePool(
- name,
- rowcacheConfig,
- 1*time.Second,
- statsURL,
- enablePublishStats,
- queryServiceStats)
-}
diff --git a/go/vt/tabletserver/codex.go b/go/vt/tabletserver/codex.go
index 33184cc6b2f..af0ef1e0822 100644
--- a/go/vt/tabletserver/codex.go
+++ b/go/vt/tabletserver/codex.go
@@ -178,51 +178,6 @@ func validateValue(col *schema.TableColumn, value sqltypes.Value) error {
return nil
}
-// getLimit resolves the rowcount or offset of the limit clause value.
-// It returns -1 if it's not set.
-func getLimit(limit interface{}, bv map[string]interface{}) (int64, error) {
- switch lim := limit.(type) {
- case string:
- lookup, ok := bv[lim[1:]]
- if !ok {
- return -1, NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "missing bind var %s", lim)
- }
- var newlim int64
- switch l := lookup.(type) {
- case int64:
- newlim = l
- case int32:
- newlim = int64(l)
- case int:
- newlim = int64(l)
- default:
- return -1, NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "want number type for %s, got %T", lim, lookup)
- }
- if newlim < 0 {
- return -1, NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "negative limit %d", newlim)
- }
- return newlim, nil
- case int64:
- return lim, nil
- default:
- return -1, nil
- }
-}
-
-func buildKey(row []sqltypes.Value) (key string) {
- buf := &bytes.Buffer{}
- for i, pkValue := range row {
- if pkValue.IsNull() {
- return ""
- }
- pkValue.EncodeASCII(buf)
- if i != len(row)-1 {
- buf.WriteByte('.')
- }
- }
- return buf.String()
-}
-
func buildStreamComment(tableInfo *TableInfo, pkValueList [][]sqltypes.Value, secondaryList [][]sqltypes.Value) []byte {
buf := bytes.NewBuffer(make([]byte, 0, 256))
fmt.Fprintf(buf, " /* _stream %s (", tableInfo.Name)
@@ -249,16 +204,6 @@ func buildPKValueList(buf *bytes.Buffer, tableInfo *TableInfo, pkValueList [][]s
}
}
-func applyFilter(columnNumbers []int, input []sqltypes.Value) (output []sqltypes.Value) {
- output = make([]sqltypes.Value, len(columnNumbers))
- for colIndex, colPointer := range columnNumbers {
- if colPointer >= 0 {
- output[colIndex] = input[colPointer]
- }
- }
- return output
-}
-
func applyFilterWithPKDefaults(tableInfo *TableInfo, columnNumbers []int, input []sqltypes.Value) (output []sqltypes.Value) {
output = make([]sqltypes.Value, len(columnNumbers))
for colIndex, colPointer := range columnNumbers {
diff --git a/go/vt/tabletserver/codex_test.go b/go/vt/tabletserver/codex_test.go
index e6993f0f900..aae27bd3b9b 100644
--- a/go/vt/tabletserver/codex_test.go
+++ b/go/vt/tabletserver/codex_test.go
@@ -342,78 +342,6 @@ func TestCodexValidateRow(t *testing.T) {
testUtils.checkTabletError(t, err, vtrpcpb.ErrorCode_BAD_INPUT, "type mismatch")
}
-func TestCodexGetLimit(t *testing.T) {
- bv := map[string]interface{}{
- "negative": -1,
- "int64": int64(1),
- "int32": int32(1),
- "int": int(1),
- "uint": uint(1),
- }
- testUtils := newTestUtils()
- _, err := getLimit(":unknown", bv)
- if err == nil {
- t.Fatal("got nil, want error: missing bind var")
- }
- testUtils.checkTabletError(t, err, vtrpcpb.ErrorCode_BAD_INPUT, "missing bind var")
- result, err := getLimit(int64(1), bv)
- if err != nil {
- t.Fatalf("getLimit(1, bv) = %v, want nil", err)
- }
- if result != 1 {
- t.Fatalf("got %d, want 1", result)
- }
- result, err = getLimit(nil, bv)
- if err != nil {
- t.Fatalf("getLimit(nil, bv) = %v, want nil", err)
- }
- if result != -1 {
- t.Fatalf("got %d, want -1", result)
- }
-
- result, err = getLimit(":negative", bv)
- if err == nil {
- t.Fatalf("getLimit(':negative', bv) should return an error")
- }
- want := "error: negative limit -1"
- if err.Error() != want {
- t.Fatalf("got %s, want %s", err.Error(), want)
- }
- if result, _ := getLimit(":int64", bv); result != 1 {
- t.Fatalf("got %d, want 1", result)
- }
- if result, _ := getLimit(":int32", bv); result != 1 {
- t.Fatalf("got %d, want 1", result)
- }
- if result, _ := getLimit(":int", bv); result != 1 {
- t.Fatalf("got %d, want 1", result)
- }
-
- _, err = getLimit(":uint", bv)
- if err == nil {
- t.Fatalf("getLimit(':uint', bv) should return an error")
- }
- want = "error: want number type for :uint, got uint"
- if err.Error() != want {
- t.Fatalf("got %s, want %s", err.Error(), want)
- }
-}
-
-func TestCodexBuildKey(t *testing.T) {
- testUtils := newTestUtils()
- newKey := buildKey([]sqltypes.Value{
- sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")),
- sqltypes.MakeTrusted(sqltypes.Int64, []byte("2")),
- })
- testUtils.checkEqual(t, "1.2", newKey)
-
- newKey = buildKey([]sqltypes.Value{
- sqltypes.MakeString([]byte("a")),
- sqltypes.NULL,
- })
- testUtils.checkEqual(t, "", newKey)
-}
-
func TestCodexApplyFilterWithPKDefaults(t *testing.T) {
testUtils := newTestUtils()
tableInfo := createTableInfo("Table",
diff --git a/go/vt/tabletserver/config.go b/go/vt/tabletserver/config.go
index 05d3026647c..4014e4c986e 100644
--- a/go/vt/tabletserver/config.go
+++ b/go/vt/tabletserver/config.go
@@ -8,7 +8,6 @@ import (
"flag"
"fmt"
"net/url"
- "strconv"
"github.com/youtube/vitess/go/streamlog"
"github.com/youtube/vitess/go/vt/dbconfigs"
@@ -37,7 +36,6 @@ func init() {
flag.Float64Var(&qsConfig.QueryTimeout, "queryserver-config-query-timeout", DefaultQsConfig.QueryTimeout, "query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed.")
flag.Float64Var(&qsConfig.TxPoolTimeout, "queryserver-config-txpool-timeout", DefaultQsConfig.TxPoolTimeout, "query server transaction pool timeout, it is how long vttablet waits if tx pool is full")
flag.Float64Var(&qsConfig.IdleTimeout, "queryserver-config-idle-timeout", DefaultQsConfig.IdleTimeout, "query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance.")
- flag.Float64Var(&qsConfig.SpotCheckRatio, "queryserver-config-spot-check-ratio", DefaultQsConfig.SpotCheckRatio, "query server rowcache spot check frequency (in [0, 1]), if rowcache is enabled, this value determines how often a row retrieved from the rowcache is spot-checked against MySQL.")
flag.BoolVar(&qsConfig.StrictMode, "queryserver-config-strict-mode", DefaultQsConfig.StrictMode, "allow only predictable DMLs and enforces MySQL's STRICT_TRANS_TABLES")
// tableacl related configurations.
flag.BoolVar(&qsConfig.StrictTableAcl, "queryserver-config-strict-table-acl", DefaultQsConfig.StrictTableAcl, "only allow queries that pass table acl checks")
@@ -45,14 +43,6 @@ func init() {
flag.StringVar(&qsConfig.TableAclExemptACL, "queryserver-config-acl-exempt-acl", DefaultQsConfig.TableAclExemptACL, "an acl that exempt from table acl checking (this acl is free to access any vitess tables).")
flag.BoolVar(&qsConfig.TerseErrors, "queryserver-config-terse-errors", DefaultQsConfig.TerseErrors, "prevent bind vars from escaping in returned errors")
flag.BoolVar(&qsConfig.EnablePublishStats, "queryserver-config-enable-publish-stats", DefaultQsConfig.EnablePublishStats, "set this flag to true makes queryservice publish monitoring stats")
- flag.BoolVar(&qsConfig.RowCache.Enabled, "enable-rowcache", DefaultQsConfig.RowCache.Enabled, "set this flag to enable rowcache. The rest of the rowcache parameters will also need to be accordingly specified.")
- flag.StringVar(&qsConfig.RowCache.Binary, "rowcache-bin", DefaultQsConfig.RowCache.Binary, "rowcache binary file, vttablet launches a memcached if rowcache is enabled. This config specifies the location of the memcache binary.")
- flag.IntVar(&qsConfig.RowCache.Memory, "rowcache-memory", DefaultQsConfig.RowCache.Memory, "rowcache max memory usage in MB")
- flag.StringVar(&qsConfig.RowCache.Socket, "rowcache-socket", DefaultQsConfig.RowCache.Socket, "socket filename hint: a unique filename will be generated based on this input")
- flag.IntVar(&qsConfig.RowCache.Connections, "rowcache-connections", DefaultQsConfig.RowCache.Connections, "rowcache max simultaneous connections")
- flag.IntVar(&qsConfig.RowCache.Threads, "rowcache-threads", DefaultQsConfig.RowCache.Threads, "rowcache number of threads")
- flag.BoolVar(&qsConfig.RowCache.LockPaged, "rowcache-lock-paged", DefaultQsConfig.RowCache.LockPaged, "whether rowcache locks down paged memory")
- flag.StringVar(&qsConfig.RowCache.StatsPrefix, "rowcache-stats-prefix", DefaultQsConfig.RowCache.StatsPrefix, "rowcache stats prefix, rowcache will export various metrics and this config specifies the metric prefix")
flag.StringVar(&qsConfig.StatsPrefix, "stats-prefix", DefaultQsConfig.StatsPrefix, "prefix for variable names exported via expvar")
flag.StringVar(&qsConfig.DebugURLPrefix, "debug-url-prefix", DefaultQsConfig.DebugURLPrefix, "debug url prefix, vttablet will report various system debug pages and this config controls the prefix of these debug urls")
flag.StringVar(&qsConfig.PoolNamePrefix, "pool-name-prefix", DefaultQsConfig.PoolNamePrefix, "pool name prefix, vttablet has several pools and each of them has a name. This config specifies the prefix of these pool names")
@@ -65,42 +55,6 @@ func Init() {
TxLogger.ServeLogs(*txLogHandler, buildFmter(TxLogger))
}
-// RowCacheConfig encapsulates the configuration for RowCache
-type RowCacheConfig struct {
- Enabled bool
- Binary string
- Memory int
- Socket string
- Connections int
- Threads int
- LockPaged bool
- StatsPrefix string
-}
-
-// GetSubprocessFlags returns the flags to use to call memcached
-func (c *RowCacheConfig) GetSubprocessFlags(socket string) []string {
- cmd := []string{}
- if c.Binary == "" {
- return cmd
- }
- cmd = append(cmd, c.Binary)
- cmd = append(cmd, "-s", socket)
- if c.Memory > 0 {
- // memory is given in bytes and rowcache expects in MBs
- cmd = append(cmd, "-m", strconv.Itoa(c.Memory/1000000))
- }
- if c.Connections > 0 {
- cmd = append(cmd, "-c", strconv.Itoa(c.Connections))
- }
- if c.Threads > 0 {
- cmd = append(cmd, "-t", strconv.Itoa(c.Threads))
- }
- if c.LockPaged {
- cmd = append(cmd, "-k")
- }
- return cmd
-}
-
// Config contains all the configuration for query service
type Config struct {
PoolSize int
@@ -115,8 +69,6 @@ type Config struct {
QueryTimeout float64
TxPoolTimeout float64
IdleTimeout float64
- RowCache RowCacheConfig
- SpotCheckRatio float64
StrictMode bool
StrictTableAcl bool
TerseErrors bool
@@ -149,8 +101,6 @@ var DefaultQsConfig = Config{
TxPoolTimeout: 1,
IdleTimeout: 30 * 60,
StreamBufferSize: 32 * 1024,
- RowCache: RowCacheConfig{Memory: -1, Connections: -1, Threads: -1},
- SpotCheckRatio: 0,
StrictMode: true,
StrictTableAcl: false,
TerseErrors: false,
@@ -193,7 +143,7 @@ type Controller interface {
AddStatusPart()
// InitDBConfig sets up the db config vars.
- InitDBConfig(querypb.Target, dbconfigs.DBConfigs, []SchemaOverride, mysqlctl.MysqlDaemon) error
+ InitDBConfig(querypb.Target, dbconfigs.DBConfigs, mysqlctl.MysqlDaemon) error
// SetServingType transitions the query service to the required serving type.
// Returns true if the state of QueryService or the tablet type changed.
diff --git a/go/vt/tabletserver/endtoend/cache_case_test.go b/go/vt/tabletserver/endtoend/cache_case_test.go
deleted file mode 100644
index 8ac96cce506..00000000000
--- a/go/vt/tabletserver/endtoend/cache_case_test.go
+++ /dev/null
@@ -1,670 +0,0 @@
-// Copyright 2015, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package endtoend
-
-import (
- "testing"
-
- "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework"
-)
-
-// TestCacheCases1 covers cases for vitess_cached1.
-func TestCacheCases1(t *testing.T) {
- client := framework.NewClient()
-
- testCases := []framework.Testable{
- framework.TestQuery("alter table vitess_cached1 comment 'new'"),
- // (1) will be in cache after this.
- &framework.TestCase{
- Name: "PK_IN (empty cache)",
- Query: "select * from vitess_cached1 where eid = 1",
- Result: [][]string{
- {"1", "a", "abcd"},
- },
- Rewritten: []string{
- "select * from vitess_cached1 where 1 != 1",
- "select eid, name, foo from vitess_cached1 where eid in (1)",
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_cached1",
- Misses: 1,
- },
- // (1)
- &framework.TestCase{
- Name: "PK_IN, use cache",
- Query: "select * from vitess_cached1 where eid = 1",
- Result: [][]string{
- {"1", "a", "abcd"},
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_cached1",
- Hits: 1,
- },
- // (1)
- &framework.TestCase{
- Name: "PK_IN int bind var",
- Query: "select * from vitess_cached1 where eid = :eid",
- BindVars: map[string]interface{}{
- "eid": 1,
- },
- Result: [][]string{
- {"1", "a", "abcd"},
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_cached1",
- Hits: 1,
- },
- // (1)
- &framework.TestCase{
- Name: "PK_IN string bind var",
- Query: "select * from vitess_cached1 where eid = :eid",
- BindVars: map[string]interface{}{
- "eid": "1",
- },
- Result: [][]string{
- {"1", "a", "abcd"},
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_cached1",
- Hits: 1,
- },
- // (1, 3)
- &framework.TestCase{
- Name: "PK_IN (empty cache)",
- Query: "select * from vitess_cached1 where eid in (1, 3, 6)",
- Result: [][]string{
- {"1", "a", "abcd"},
- {"3", "c", "abcd"},
- },
- Rewritten: []string{
- "select * from vitess_cached1 where 1 != 1",
- "select eid, name, foo from vitess_cached1 where eid in (3, 6)",
- },
- RowsAffected: 2,
- Plan: "PK_IN",
- Table: "vitess_cached1",
- Hits: 1,
- Misses: 1,
- Absent: 1,
- },
- // (1, 3)
- &framework.TestCase{
- Name: "PK_IN limit 0",
- Query: "select * from vitess_cached1 where eid in (1, 3, 6) limit 0",
- Rewritten: []string{
- "select * from vitess_cached1 where 1 != 1",
- },
- Plan: "PK_IN",
- Table: "vitess_cached1",
- },
- // (1, 3)
- &framework.TestCase{
- Name: "PK_IN limit 1",
- Query: "select * from vitess_cached1 where eid in (1, 3, 6) limit 1",
- Result: [][]string{
- {"1", "a", "abcd"},
- },
- Rewritten: []string{
- "select * from vitess_cached1 where 1 != 1",
- "select eid, name, foo from vitess_cached1 where eid in (6)",
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_cached1",
- Hits: 2,
- Absent: 1,
- },
- // (1, 3)
- &framework.TestCase{
- Name: "PK_IN limit :a",
- Query: "select * from vitess_cached1 where eid in (1, 3, 6) limit :a",
- BindVars: map[string]interface{}{
- "a": 1,
- },
- Result: [][]string{
- {"1", "a", "abcd"},
- },
- Rewritten: []string{
- "select * from vitess_cached1 where 1 != 1",
- "select eid, name, foo from vitess_cached1 where eid in (6)",
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_cached1",
- Hits: 2,
- Absent: 1,
- },
- // (1, 2, 3)
- &framework.TestCase{
- Name: "SELECT_SUBQUERY (1, 2)",
- Query: "select * from vitess_cached1 where name = 'a'",
- Result: [][]string{
- {"1", "a", "abcd"},
- {"2", "a", "abcd"},
- },
- Rewritten: []string{
- "select * from vitess_cached1 where 1 != 1",
- "select eid from vitess_cached1 use index (aname1) where name = 'a' limit 10001",
- "select eid, name, foo from vitess_cached1 where eid in (2)",
- },
- RowsAffected: 2,
- Table: "vitess_cached1",
- Hits: 1,
- Misses: 1,
- },
- // (1, 2, 3)
- &framework.TestCase{
- Name: "covering index",
- Query: "select eid, name from vitess_cached1 where name = 'a'",
- Result: [][]string{
- {"1", "a"},
- {"2", "a"},
- },
- Rewritten: []string{
- "select eid, name from vitess_cached1 where 1 != 1",
- "select eid, name from vitess_cached1 where name = 'a' limit 10001",
- },
- RowsAffected: 2,
- Plan: "PASS_SELECT",
- Table: "vitess_cached1",
- },
- // (1, 2, 3)
- &framework.TestCase{
- Name: "SELECT_SUBQUERY (1, 2)",
- Query: "select * from vitess_cached1 where name = 'a'",
- Result: [][]string{
- {"1", "a", "abcd"},
- {"2", "a", "abcd"},
- },
- Rewritten: []string{
- "select eid from vitess_cached1 use index (aname1) where name = 'a' limit 10001",
- },
- RowsAffected: 2,
- Table: "vitess_cached1",
- Hits: 2,
- },
- // (1, 2, 3, 4, 5)
- &framework.TestCase{
- Name: "SELECT_SUBQUERY (4, 5)",
- Query: "select * from vitess_cached1 where name between 'd' and 'e'",
- Result: [][]string{
- {"4", "d", "abcd"},
- {"5", "e", "efgh"},
- },
- Rewritten: []string{
- "select * from vitess_cached1 where 1 != 1",
- "select eid from vitess_cached1 use index (aname1) where name between 'd' and 'e' limit 10001",
- "select eid, name, foo from vitess_cached1 where eid in (4, 5)",
- },
- RowsAffected: 2,
- Plan: "SELECT_SUBQUERY",
- Table: "vitess_cached1",
- Misses: 2,
- },
- // (1, 2, 3, 4, 5)
- &framework.TestCase{
- Name: "PASS_SELECT",
- Query: "select * from vitess_cached1 where foo='abcd'",
- Result: [][]string{
- {"1", "a", "abcd"},
- {"2", "a", "abcd"},
- {"3", "c", "abcd"},
- {"4", "d", "abcd"},
- },
- Rewritten: []string{
- "select * from vitess_cached1 where 1 != 1",
- "select * from vitess_cached1 where foo = 'abcd' limit 10001",
- },
- RowsAffected: 4,
- Plan: "PASS_SELECT",
- Table: "vitess_cached1",
- },
- }
- for _, tcase := range testCases {
- if err := tcase.Test("", client); err != nil {
- t.Error(err)
- }
- }
-}
-
-// TestCacheCases2 covers cases for vitess_cached2.
-func TestCacheCases2(t *testing.T) {
- client := framework.NewClient()
-
- testCases := []framework.Testable{
- framework.TestQuery("alter table vitess_cached2 comment 'new'"),
- &framework.TestCase{
- Name: "PK_IN (null key)",
- Query: "select * from vitess_cached2 where eid = 2 and bid = :bid",
- BindVars: map[string]interface{}{
- "bid": nil,
- },
- Rewritten: []string{
- "select * from vitess_cached2 where 1 != 1",
- "select eid, bid, name, foo from vitess_cached2 where (eid = 2 and bid = null)",
- },
- Plan: "PK_IN",
- Table: "vitess_cached2",
- Absent: 1,
- },
- // (2.foo) is in cache
- &framework.TestCase{
- Name: "PK_IN (empty cache)",
- Query: "select * from vitess_cached2 where eid = 2 and bid = 'foo'",
- Result: [][]string{
- {"2", "foo", "abcd2", "efgh"},
- },
- Rewritten: []string{
- "select * from vitess_cached2 where 1 != 1",
- "select eid, bid, name, foo from vitess_cached2 where (eid = 2 and bid = 'foo')",
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_cached2",
- Misses: 1,
- },
- // (2.foo)
- &framework.TestCase{
- Name: "PK_IN, use cache",
- Query: "select bid, eid, name, foo from vitess_cached2 where eid = 2 and bid = 'foo'",
- Result: [][]string{
- {"foo", "2", "abcd2", "efgh"},
- },
- Rewritten: []string{
- "select bid, eid, name, foo from vitess_cached2 where 1 != 1",
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_cached2",
- Hits: 1,
- },
- // (2.foo)
- &framework.TestCase{
- Name: "PK_IN, absent",
- Query: "select bid, eid, name, foo from vitess_cached2 where eid = 3 and bid = 'foo'",
- Rewritten: []string{
- "select bid, eid, name, foo from vitess_cached2 where 1 != 1",
- "select eid, bid, name, foo from vitess_cached2 where (eid = 3 and bid = 'foo')",
- },
- Plan: "PK_IN",
- Table: "vitess_cached2",
- Absent: 1,
- },
- // (1.foo, 2.foo)
- &framework.TestCase{
- Name: "out of order columns list",
- Query: "select bid, eid from vitess_cached2 where eid = 1 and bid = 'foo'",
- Result: [][]string{
- {"foo", "1"},
- },
- Rewritten: []string{
- "select bid, eid from vitess_cached2 where 1 != 1",
- "select eid, bid, name, foo from vitess_cached2 where (eid = 1 and bid = 'foo')",
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Misses: 1,
- },
- // (1.foo, 2.foo)
- &framework.TestCase{
- Name: "out of order columns list, use cache",
- Query: "select bid, eid from vitess_cached2 where eid = 1 and bid = 'foo'",
- Result: [][]string{
- {"foo", "1"},
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Hits: 1,
- },
- // (1.foo, 1.bar, 2.foo)
- &framework.TestCase{
- Name: "pk_in for composite pk table, two fetches from db (absent)",
- Query: "select eid, bid, name, foo from vitess_cached2 where eid = 1 and bid in('absent1', 'absent2')",
- Rewritten: []string{
- "select eid, bid, name, foo from vitess_cached2 where 1 != 1",
- "select eid, bid, name, foo from vitess_cached2 where (eid = 1 and bid = 'absent1') or (eid = 1 and bid = 'absent2')",
- },
- Plan: "PK_IN",
- Table: "vitess_cached2",
- Absent: 2,
- },
- // (1.foo, 1.bar, 2.foo)
- &framework.TestCase{
- Name: "pk_in for composite pk table, 1 fetch from db",
- Query: "select eid, bid, name, foo from vitess_cached2 where eid = 1 and bid in('foo', 'bar')",
- Result: [][]string{
- {"1", "foo", "abcd1", "efgh"},
- {"1", "bar", "abcd1", "efgh"},
- },
- Rewritten: []string{
- "select eid, bid, name, foo from vitess_cached2 where 1 != 1",
- "select eid, bid, name, foo from vitess_cached2 where (eid = 1 and bid = 'bar')",
- },
- RowsAffected: 2,
- Plan: "PK_IN",
- Table: "vitess_cached2",
- Hits: 1,
- Misses: 1,
- },
- // (1.foo, 1.bar, 2.foo)
- &framework.TestCase{
- Name: "pk_in for composite pk table, 0 fetch from db",
- Query: "select eid, bid, name, foo from vitess_cached2 where eid = 1 and bid in('foo', 'bar')",
- Result: [][]string{
- {"1", "foo", "abcd1", "efgh"},
- {"1", "bar", "abcd1", "efgh"},
- },
- RowsAffected: 2,
- Plan: "PK_IN",
- Table: "vitess_cached2",
- Hits: 2,
- },
- // (1.foo, 1.bar, 2.foo, 2.bar)
- &framework.TestCase{
- Name: "select_subquery for composite pk table, 1 fetch from db",
- Query: "select eid, bid, name, foo from vitess_cached2 where eid = 2 and name='abcd2'",
- Result: [][]string{
- {"2", "foo", "abcd2", "efgh"},
- {"2", "bar", "abcd2", "efgh"},
- },
- Rewritten: []string{
- "select eid, bid, name, foo from vitess_cached2 where 1 != 1",
- "select eid, bid from vitess_cached2 use index (aname2) where eid = 2 and name = 'abcd2' limit 10001",
- "select eid, bid, name, foo from vitess_cached2 where (eid = 2 and bid = 'bar')",
- },
- RowsAffected: 2,
- Plan: "SELECT_SUBQUERY",
- Table: "vitess_cached2",
- Hits: 1,
- Misses: 1,
- },
- // (1.foo, 1.bar, 2.foo, 2.bar)
- &framework.TestCase{
- Name: "verify 1.bar is in cache",
- Query: "select bid, eid from vitess_cached2 where eid = 1 and bid = 'bar'",
- Result: [][]string{
- {"bar", "1"},
- },
- Rewritten: []string{
- "select bid, eid from vitess_cached2 where 1 != 1",
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Hits: 1,
- },
- // (1.foo, 1.bar, 2.foo, 2.bar)
- &framework.MultiCase{
- Name: "update",
- Cases: []framework.Testable{
- framework.TestQuery("begin"),
- framework.TestQuery("update vitess_cached2 set foo='fghi' where bid = 'bar'"),
- &framework.TestCase{
- Query: "commit",
- Table: "vitess_cached2",
- Invalidations: 2,
- },
- &framework.TestCase{
- Query: "select * from vitess_cached2 where eid = 1 and bid = 'bar'",
- Result: [][]string{
- {"1", "bar", "abcd1", "fghi"},
- },
- Rewritten: []string{
- "select * from vitess_cached2 where 1 != 1",
- "select eid, bid, name, foo from vitess_cached2 where (eid = 1 and bid = 'bar')",
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Misses: 1,
- },
- },
- },
- // (1.foo, 1.bar, 2.foo, 2.bar)
- &framework.MultiCase{
- Name: "this will not invalidate the cache",
- Cases: []framework.Testable{
- framework.TestQuery("begin"),
- framework.TestQuery("update vitess_cached2 set foo='fghi' where bid = 'bar'"),
- framework.TestQuery("rollback"),
- &framework.TestCase{
- Query: "select * from vitess_cached2 where eid = 1 and bid = 'bar'",
- Result: [][]string{
- {"1", "bar", "abcd1", "fghi"},
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Hits: 1,
- },
- },
- },
- // (1.foo, 1.bar, 2.foo, 2.bar)
- &framework.MultiCase{
- Name: "upsert should invalidate rowcache",
- Cases: []framework.Testable{
- &framework.TestCase{
- Query: "select * from vitess_cached2 where eid = 1 and bid = 'bar'",
- Result: [][]string{
- {"1", "bar", "abcd1", "fghi"},
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Hits: 1,
- },
- framework.TestQuery("begin"),
- &framework.TestCase{
- Query: "insert into vitess_cached2 values(1, 'bar', 'abcd1', 'fghi') on duplicate key update foo='fghi'",
- Rewritten: []string{
- "insert into vitess_cached2 values (1, 'bar', 'abcd1', 'fghi') /* _stream vitess_cached2 (eid bid ) (1 'YmFy' )",
- "update vitess_cached2 set foo = 'fghi' where (eid = 1 and bid = 'bar') /* _stream vitess_cached2 (eid bid ) (1 'YmFy' )",
- },
- Table: "vitess_cached2",
- },
- framework.TestQuery("commit"),
- &framework.TestCase{
- Query: "select * from vitess_cached2 where eid = 1 and bid = 'bar'",
- Result: [][]string{
- {"1", "bar", "abcd1", "fghi"},
- },
- Rewritten: []string{
- "select eid, bid, name, foo from vitess_cached2 where (eid = 1 and bid = 'bar')",
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Misses: 1,
- },
- },
- },
- // (1.foo, 2.foo, 2.bar)
- &framework.MultiCase{
- Name: "delete",
- Cases: []framework.Testable{
- framework.TestQuery("begin"),
- framework.TestQuery("delete from vitess_cached2 where eid = 1 and bid = 'bar'"),
- &framework.TestCase{
- Query: "commit",
- Table: "vitess_cached2",
- Invalidations: 1,
- },
- &framework.TestCase{
- Query: "select * from vitess_cached2 where eid = 1 and bid = 'bar'",
- Rewritten: []string{
- "select eid, bid, name, foo from vitess_cached2 where (eid = 1 and bid = 'bar')",
- },
- Table: "vitess_cached2",
- Absent: 1,
- },
- framework.TestQuery("begin"),
- framework.TestQuery("insert into vitess_cached2(eid, bid, name, foo) values (1, 'bar', 'abcd1', 'efgh')"),
- &framework.TestCase{
- Query: "commit",
- Table: "vitess_cached2",
- },
- },
- },
- // (1.foo, 2.foo, 2.bar)
- &framework.TestCase{
- Name: "Verify 1.foo is in cache",
- Query: "select * from vitess_cached2 where eid = 1 and bid = 'foo'",
- Result: [][]string{
- {"1", "foo", "abcd1", "efgh"},
- },
- Rewritten: []string{
- "select * from vitess_cached2 where 1 != 1",
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Hits: 1,
- },
- // DDL
- framework.TestQuery("alter table vitess_cached2 comment 'test'"),
- // (1.foo)
- &framework.TestCase{
- Name: "Verify cache is empty after DDL",
- Query: "select * from vitess_cached2 where eid = 1 and bid = 'foo'",
- Result: [][]string{
- {"1", "foo", "abcd1", "efgh"},
- },
- Rewritten: []string{
- "select * from vitess_cached2 where 1 != 1",
- "select eid, bid, name, foo from vitess_cached2 where (eid = 1 and bid = 'foo')",
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Misses: 1,
- },
- // (1.foo)
- &framework.TestCase{
- Name: "Verify row is cached",
- Query: "select * from vitess_cached2 where eid = 1 and bid = 'foo'",
- Result: [][]string{
- {"1", "foo", "abcd1", "efgh"},
- },
- RowsAffected: 1,
- Table: "vitess_cached2",
- Hits: 1,
- },
- }
- for _, tcase := range testCases {
- if err := tcase.Test("", client); err != nil {
- t.Error(err)
- }
- }
-}
-
-func TestCacheCasesOverrides(t *testing.T) {
- client := framework.NewClient()
-
- testCases := []framework.Testable{
- &framework.TestCase{
- Name: "select from view (cache miss)",
- Query: "select * from vitess_view where key2 = 1",
- Result: [][]string{
- {"1", "10", "1", "3"},
- },
- Rewritten: []string{
- "select * from vitess_view where 1 != 1",
- "select key2, key1, data1, data2 from vitess_view where key2 in (1)",
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_view",
- Misses: 1,
- },
- &framework.TestCase{
- Name: "select from view (cache hit)",
- Query: "select * from vitess_view where key2 = 1",
- Result: [][]string{
- {"1", "10", "1", "3"},
- },
- Rewritten: []string{},
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_view",
- Hits: 1,
- },
- &framework.TestCase{
- Name: "update part1 table of view",
- Query: "update vitess_part1 set data1 = 2 where key2 = 1",
- Rewritten: []string{
- "begin",
- "update vitess_part1 set data1 = 2 where key2 in (1) /* _stream vitess_part1 (key2 ) (1 )",
- "commit",
- },
- RowsAffected: 1,
- Plan: "DML_PK",
- },
- &framework.TestCase{
- Name: "verify cache got invalidated",
- Query: "select * from vitess_view where key2 = 1",
- Result: [][]string{
- {"1", "10", "2", "3"},
- },
- Rewritten: []string{
- "select key2, key1, data1, data2 from vitess_view where key2 in (1)",
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_view",
- Misses: 1,
- },
- &framework.TestCase{
- Name: "verify cache got reloaded",
- Query: "select * from vitess_view where key2 = 1",
- Result: [][]string{
- {"1", "10", "2", "3"},
- },
- Rewritten: []string{},
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_view",
- Hits: 1,
- },
- &framework.TestCase{
- Name: "update part2 table of view",
- Query: "update vitess_part2 set data2 = 2 where key3 = 1",
- Rewritten: []string{
- "begin",
- "update vitess_part2 set data2 = 2 where key3 in (1) /* _stream vitess_part2 (key3 ) (1 )",
- "commit",
- },
- RowsAffected: 1,
- Plan: "DML_PK",
- },
- &framework.TestCase{
- Name: "re-verify cache got invalidated",
- Query: "select * from vitess_view where key2 = 1",
- Result: [][]string{
- {"1", "10", "2", "2"},
- },
- Rewritten: []string{
- "select key2, key1, data1, data2 from vitess_view where key2 in (1)",
- },
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_view",
- Misses: 1,
- },
- &framework.TestCase{
- Name: "re-verify cache got reloaded",
- Query: "select * from vitess_view where key2 = 1",
- Result: [][]string{
- {"1", "10", "2", "2"},
- },
- Rewritten: []string{},
- RowsAffected: 1,
- Plan: "PK_IN",
- Table: "vitess_view",
- Hits: 1,
- },
- }
- for _, tcase := range testCases {
- if err := tcase.Test("", client); err != nil {
- t.Error(err)
- }
- }
-}
diff --git a/go/vt/tabletserver/endtoend/cache_test.go b/go/vt/tabletserver/endtoend/cache_test.go
deleted file mode 100644
index 9b701cd11ff..00000000000
--- a/go/vt/tabletserver/endtoend/cache_test.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright 2015, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package endtoend
-
-import (
- "fmt"
- "strings"
- "testing"
-
- "github.com/youtube/vitess/go/vt/schema"
- "github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework"
-)
-
-func TestUncacheableTables(t *testing.T) {
- client := framework.NewClient()
-
- nocacheTables := []struct {
- name string
- create string
- drop string
- }{{
- create: "create table vitess_nocache(eid int, primary key (eid)) comment 'vitess_nocache'",
- drop: "drop table vitess_nocache",
- }, {
- create: "create table vitess_nocache(somecol int)",
- drop: "drop table vitess_nocache",
- }, {
- create: "create table vitess_nocache(charcol varchar(10), primary key(charcol))",
- drop: "drop table vitess_nocache",
- }}
- for _, tcase := range nocacheTables {
- _, err := client.Execute(tcase.create, nil)
- if err != nil {
- t.Error(err)
- return
- }
- table, ok := framework.DebugSchema()["vitess_nocache"]
- client.Execute(tcase.drop, nil)
- if !ok {
- t.Errorf("%s: table vitess_nocache not found in schema", tcase.create)
- continue
- }
- if table.Type != schema.CacheNone {
- t.Errorf("Type: %d, want %d", table.Type, schema.CacheNone)
- }
- }
-}
-
-func TestOverrideTables(t *testing.T) {
- testCases := []struct {
- table string
- cacheType int
- }{{
- table: "vitess_cached2",
- cacheType: schema.CacheRW,
- }, {
- table: "vitess_view",
- cacheType: schema.CacheRW,
- }, {
- table: "vitess_part1",
- cacheType: schema.CacheW,
- }, {
- table: "vitess_part2",
- cacheType: schema.CacheW,
- }}
- for _, tcase := range testCases {
- table, ok := framework.DebugSchema()[tcase.table]
- if !ok {
- t.Errorf("Table %s not found in schema", tcase.table)
- return
- }
- if table.Type != tcase.cacheType {
- t.Errorf("Type: %d, want %d", table.Type, tcase.cacheType)
- }
- }
-}
-
-func TestCacheDisallows(t *testing.T) {
- client := framework.NewClient()
- testCases := []struct {
- query string
- bv map[string]interface{}
- err string
- }{{
- query: "select bid, eid from vitess_cached2 where eid = 1 and bid = 1",
- err: "error: type mismatch",
- }, {
- query: "select * from vitess_cached2 where eid = 2 and bid = 'foo' limit :a",
- bv: map[string]interface{}{"a": -1},
- err: "error: negative limit",
- }}
- for _, tcase := range testCases {
- _, err := client.Execute(tcase.query, tcase.bv)
- if err == nil || !strings.HasPrefix(err.Error(), tcase.err) {
- t.Errorf("Error: %v, want %s", err, tcase.err)
- return
- }
- }
-}
-
-func TestCacheListArgs(t *testing.T) {
- client := framework.NewClient()
- query := "select * from vitess_cached1 where eid in ::list"
- successCases := []struct {
- bv map[string]interface{}
- rowcount uint64
- }{{
- bv: map[string]interface{}{"list": []interface{}{3, 4, 32768}},
- rowcount: 2,
- }, {
- bv: map[string]interface{}{"list": []interface{}{3, 4}},
- rowcount: 2,
- }, {
- bv: map[string]interface{}{"list": []interface{}{3}},
- rowcount: 1,
- }}
- for _, success := range successCases {
- qr, err := client.Execute(query, success.bv)
- if err != nil {
- t.Error(err)
- continue
- }
- if qr.RowsAffected != success.rowcount {
- t.Errorf("RowsAffected: %d, want %d", qr.RowsAffected, success.rowcount)
- }
- }
-
- _, err := client.Execute(query, map[string]interface{}{"list": []interface{}{}})
- want := "error: empty list supplied"
- if err == nil || !strings.HasPrefix(err.Error(), want) {
- t.Errorf("Error: %v, want %s", err, want)
- return
- }
-}
-
-func verifyvitessCached2(t *testing.T, table string) error {
- client := framework.NewClient()
- query := fmt.Sprintf("select * from %s where eid = 2 and bid = 'foo'", table)
- _, err := client.Execute(query, nil)
- if err != nil {
- return err
- }
- tstart := framework.TableStats()[table]
- _, err = client.Execute(query, nil)
- if err != nil {
- return err
- }
- tend := framework.TableStats()[table]
- if tend.Hits != tstart.Hits+1 {
- return fmt.Errorf("Hits: %d, want %d", tend.Hits, tstart.Hits+1)
- }
- return nil
-}
-
-func TestUncache(t *testing.T) {
- // Verify rowcache is working vitess_cached2
- err := verifyvitessCached2(t, "vitess_cached2")
- if err != nil {
- t.Error(err)
- return
- }
-
- // Disable rowcache for vitess_cached2
- client := framework.NewClient()
- _, err = client.Execute("alter table vitess_cached2 comment 'vitess_nocache'", nil)
- if err != nil {
- t.Error(err)
- return
- }
- _, err = client.Execute("select * from vitess_cached2 where eid = 2 and bid = 'foo'", nil)
- if err != nil {
- t.Error(err)
- }
- if tstat, ok := framework.TableStats()["vitess_cached2"]; ok {
- t.Errorf("table stats was found: %v, want not found", tstat)
- }
-
- // Re-enable rowcache and verify it's working
- _, err = client.Execute("alter table vitess_cached2 comment ''", nil)
- if err != nil {
- t.Error(err)
- return
- }
- err = verifyvitessCached2(t, "vitess_cached2")
- if err != nil {
- t.Error(err)
- return
- }
-}
-
-func TestRename(t *testing.T) {
- // Verify rowcache is working vitess_cached2
- err := verifyvitessCached2(t, "vitess_cached2")
- if err != nil {
- t.Error(err)
- return
- }
-
- // Rename & test
- client := framework.NewClient()
- _, err = client.Execute("alter table vitess_cached2 rename to vitess_renamed", nil)
- if err != nil {
- t.Error(err)
- return
- }
- if tstat, ok := framework.TableStats()["vitess_cached2"]; ok {
- t.Errorf("table stats was found: %v, want not found", tstat)
- }
-
- err = verifyvitessCached2(t, "vitess_renamed")
- if err != nil {
- t.Error(err)
- return
- }
-
- // Rename back & verify
- _, err = client.Execute("rename table vitess_renamed to vitess_cached2", nil)
- if err != nil {
- t.Error(err)
- return
- }
- err = verifyvitessCached2(t, "vitess_cached2")
- if err != nil {
- t.Error(err)
- return
- }
-}
-
-func TestSpotCheck(t *testing.T) {
- vstart := framework.DebugVars()
- client := framework.NewClient()
- _, err := client.Execute("select * from vitess_cached2 where eid = 2 and bid = 'foo'", nil)
- if err != nil {
- t.Error(err)
- return
- }
- if err := compareIntDiff(framework.DebugVars(), "RowcacheSpotCheckCount", vstart, 0); err != nil {
- t.Error(err)
- }
-
- defer framework.Server.SetSpotCheckRatio(framework.Server.SpotCheckRatio())
- framework.Server.SetSpotCheckRatio(1)
- if err := verifyIntValue(framework.DebugVars(), "RowcacheSpotCheckRatio", 1); err != nil {
- t.Error(err)
- }
-
- vstart = framework.DebugVars()
- _, err = client.Execute("select * from vitess_cached2 where eid = 2 and bid = 'foo'", nil)
- if err != nil {
- t.Error(err)
- return
- }
- if err := compareIntDiff(framework.DebugVars(), "RowcacheSpotCheckCount", vstart, 1); err != nil {
- t.Error(err)
- }
-
- vstart = framework.DebugVars()
- _, err = client.Execute("select * from vitess_cached1 where eid in (9)", nil)
- if err != nil {
- t.Error(err)
- return
- }
- if err := compareIntDiff(framework.DebugVars(), "RowcacheSpotCheckCount", vstart, 0); err != nil {
- t.Error(err)
- }
- _, err = client.Execute("select * from vitess_cached1 where eid in (9)", nil)
- if err != nil {
- t.Error(err)
- return
- }
- if err := compareIntDiff(framework.DebugVars(), "RowcacheSpotCheckCount", vstart, 1); err != nil {
- t.Error(err)
- }
-}
-
-func TestCacheTypes(t *testing.T) {
- client := framework.NewClient()
- badRequests := []struct {
- query string
- bv map[string]interface{}
- out string
- }{{
- query: "select * from vitess_cached2 where eid = 'str' and bid = 'str'",
- out: "error: strconv.ParseInt",
- }, {
- query: "select * from vitess_cached2 where eid = :str and bid = :str",
- bv: map[string]interface{}{"str": "str"},
- out: "error: strconv.ParseInt",
- }, {
- query: "select * from vitess_cached2 where eid = 1 and bid = 1",
- out: "error: type mismatch",
- }, {
- query: "select * from vitess_cached2 where eid = :id and bid = :id",
- bv: map[string]interface{}{"id": 1},
- out: "error: type mismatch",
- }, {
- query: "select * from vitess_cached2 where eid = 1.2 and bid = 1.2",
- out: "error: type mismatch",
- }, {
- query: "select * from vitess_cached2 where eid = :fl and bid = :fl",
- bv: map[string]interface{}{"fl": 1.2},
- out: "error: type mismatch",
- }}
- for _, tcase := range badRequests {
- _, err := client.Execute(tcase.query, tcase.bv)
- if err == nil || !strings.HasPrefix(err.Error(), tcase.out) {
- t.Errorf("%s: %v, want %s", tcase.query, err, tcase.out)
- }
- }
-}
-
-func TestNoData(t *testing.T) {
- qr, err := framework.NewClient().Execute("select * from vitess_cached2 where eid = 6 and name = 'bar'", nil)
- if err != nil {
- t.Error(err)
- return
- }
- if qr.RowsAffected != 0 {
- t.Errorf("RowsAffected: %d, want 0", qr.RowsAffected)
- }
-}
-
-func TestCacheStats(t *testing.T) {
- client := framework.NewClient()
- query := "select * from vitess_cached2 where eid = 2 and bid = 'foo'"
- _, err := client.Execute(query, nil)
- if err != nil {
- t.Error(err)
- return
- }
- vstart := framework.DebugVars()
- _, err = client.Execute(query, nil)
- if err != nil {
- t.Error(err)
- return
- }
- if err := compareIntDiff(framework.DebugVars(), "RowcacheStats/vitess_cached2.Hits", vstart, 1); err != nil {
- t.Error(err)
- }
-
- vstart = framework.DebugVars()
- _, err = client.Execute("update vitess_part2 set data2 = 2 where key3 = 1", nil)
- if err != nil {
- t.Error(err)
- return
- }
- _, err = client.Execute("select * from vitess_view where key2 = 1", nil)
- if err != nil {
- t.Error(err)
- return
- }
- if err := compareIntDiff(framework.DebugVars(), "RowcacheStats/vitess_view.Misses", vstart, 1); err != nil {
- t.Error(err)
- }
-}
diff --git a/go/vt/tabletserver/endtoend/config_test.go b/go/vt/tabletserver/endtoend/config_test.go
index 2080eba11d5..5a0249c8e46 100644
--- a/go/vt/tabletserver/endtoend/config_test.go
+++ b/go/vt/tabletserver/endtoend/config_test.go
@@ -60,21 +60,6 @@ func TestConfigVars(t *testing.T) {
}, {
tag: "QueryTimeout",
val: int(framework.BaseConfig.QueryTimeout * 1e9),
- }, {
- tag: "RowcacheConnPoolAvailable",
- val: framework.BaseConfig.RowCache.Connections - 50,
- }, {
- tag: "RowcacheConnPoolCapacity",
- val: framework.BaseConfig.RowCache.Connections - 50,
- }, {
- tag: "RowcacheConnPoolIdleTimeout",
- val: int(framework.BaseConfig.IdleTimeout * 1e9),
- }, {
- tag: "RowcacheConnPoolMaxCap",
- val: framework.BaseConfig.RowCache.Connections - 50,
- }, {
- tag: "RowcacheSpotCheckRatio",
- val: 0,
}, {
tag: "SchemaReloadTime",
val: int(framework.BaseConfig.SchemaReloadTime * 1e9),
diff --git a/go/vt/tabletserver/endtoend/framework/server.go b/go/vt/tabletserver/endtoend/framework/server.go
index 6b7cb5d893f..64e0ec12669 100644
--- a/go/vt/tabletserver/endtoend/framework/server.go
+++ b/go/vt/tabletserver/endtoend/framework/server.go
@@ -8,8 +8,6 @@ import (
"fmt"
"net"
"net/http"
- "os"
- "path"
"time"
"github.com/youtube/vitess/go/sqldb"
@@ -18,7 +16,6 @@ import (
querypb "github.com/youtube/vitess/go/vt/proto/query"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
"github.com/youtube/vitess/go/vt/tabletserver"
- "github.com/youtube/vitess/go/vt/vttest"
)
var (
@@ -35,7 +32,7 @@ var (
// StartServer starts the server and initializes
// all the global variables. This function should only be called
// once at the beginning of the test.
-func StartServer(connParams sqldb.ConnParams, schemaOverrides []tabletserver.SchemaOverride) error {
+func StartServer(connParams sqldb.ConnParams) error {
dbcfgs := dbconfigs.DBConfigs{
App: dbconfigs.DBConfig{
ConnParams: connParams,
@@ -53,10 +50,6 @@ func StartServer(connParams sqldb.ConnParams, schemaOverrides []tabletserver.Sch
&dbcfgs.Repl)
BaseConfig = tabletserver.DefaultQsConfig
- BaseConfig.RowCache.Enabled = true
- BaseConfig.RowCache.Binary = vttest.MemcachedPath()
- BaseConfig.RowCache.Socket = path.Join(os.TempDir(), "memcache.sock")
- BaseConfig.RowCache.Connections = 100
BaseConfig.EnableAutoCommit = true
BaseConfig.StrictTableAcl = true
@@ -68,7 +61,7 @@ func StartServer(connParams sqldb.ConnParams, schemaOverrides []tabletserver.Sch
Server = tabletserver.NewTabletServer(BaseConfig)
Server.Register()
- err := Server.StartService(Target, dbcfgs, schemaOverrides, mysqld)
+ err := Server.StartService(Target, dbcfgs, mysqld)
if err != nil {
return fmt.Errorf("could not start service: %v\n", err)
}
diff --git a/go/vt/tabletserver/endtoend/framework/tablestats.go b/go/vt/tabletserver/endtoend/framework/tablestats.go
deleted file mode 100644
index 8867f66e6cd..00000000000
--- a/go/vt/tabletserver/endtoend/framework/tablestats.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2015, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package framework
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
-)
-
-// TableStat contains the stats for one table.
-type TableStat struct {
- Hits, Absent, Misses, Invalidations int
-}
-
-// TableStats parses /debug/table_stats and returns
-// a map of the table stats keyed by the table name.
-func TableStats() map[string]TableStat {
- out := make(map[string]TableStat)
- response, err := http.Get(fmt.Sprintf("%s/debug/table_stats", ServerAddress))
- if err != nil {
- return out
- }
- defer response.Body.Close()
- _ = json.NewDecoder(response.Body).Decode(&out)
- return out
-}
diff --git a/go/vt/tabletserver/endtoend/framework/testcase.go b/go/vt/tabletserver/endtoend/framework/testcase.go
index b8cac71e562..66fe10befe8 100644
--- a/go/vt/tabletserver/endtoend/framework/testcase.go
+++ b/go/vt/tabletserver/endtoend/framework/testcase.go
@@ -93,11 +93,6 @@ func (tc *TestCase) Test(name string, client *QueryClient) error {
catcher := NewQueryCatcher()
defer catcher.Close()
- var tstart TableStat
- if tc.Table != "" {
- tstart = TableStats()[tc.Table]
- }
-
qr, err := exec(client, tc.Query, tc.BindVars)
if err != nil {
return fmt.Errorf("%s: Execute failed: %v", name, err)
@@ -142,22 +137,6 @@ func (tc *TestCase) Test(name string, client *QueryClient) error {
errs = append(errs, fmt.Sprintf("Plan mismatch: %s, want %s", queryInfo.PlanType, tc.Plan))
}
}
-
- if tc.Table != "" {
- tend := TableStats()[tc.Table]
- if err = checkStat("Hits", tc.Hits, tend.Hits-tstart.Hits, queryInfo.CacheHits); err != nil {
- errs = append(errs, err.Error())
- }
- if err = checkStat("Misses", tc.Misses, tend.Misses-tstart.Misses, queryInfo.CacheMisses); err != nil {
- errs = append(errs, err.Error())
- }
- if err = checkStat("Absent", tc.Absent, tend.Absent-tstart.Absent, queryInfo.CacheAbsent); err != nil {
- errs = append(errs, err.Error())
- }
- if err = checkStat("Invalidations", tc.Invalidations, tend.Invalidations-tstart.Invalidations, queryInfo.CacheInvalidations); err != nil {
- errs = append(errs, err.Error())
- }
- }
if len(errs) != 0 {
if name == "" {
return errors.New(strings.Join(errs, "\n"))
diff --git a/go/vt/tabletserver/endtoend/main_test.go b/go/vt/tabletserver/endtoend/main_test.go
index 1a19e5b7ec5..7e4876d1d73 100644
--- a/go/vt/tabletserver/endtoend/main_test.go
+++ b/go/vt/tabletserver/endtoend/main_test.go
@@ -5,7 +5,6 @@
package endtoend
import (
- "encoding/json"
"errors"
"flag"
"fmt"
@@ -19,11 +18,6 @@ import (
"github.com/youtube/vitess/go/vt/tabletserver"
"github.com/youtube/vitess/go/vt/tabletserver/endtoend/framework"
"github.com/youtube/vitess/go/vt/vttest"
-
- // import mysql to register mysql connection function
-
- // import memcache to register memcache connection function
- _ "github.com/youtube/vitess/go/memcache"
)
var (
@@ -47,14 +41,7 @@ func TestMain(m *testing.M) {
return 1
}
- var schemaOverrides []tabletserver.SchemaOverride
- err = json.Unmarshal([]byte(schemaOverrideJSON), &schemaOverrides)
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v", err)
- return 1
- }
-
- err = framework.StartServer(connParams, schemaOverrides)
+ err = framework.StartServer(connParams)
if err != nil {
fmt.Fprintf(os.Stderr, "%v", err)
return 1
@@ -92,55 +79,30 @@ func initTableACL() error {
return nil
}
-var testSchema = `create table vitess_test(intval int, floatval float, charval varchar(256), binval varbinary(256), primary key(intval)) comment 'vitess_nocache';
+var testSchema = `create table vitess_test(intval int, floatval float, charval varchar(256), binval varbinary(256), primary key(intval));
insert into vitess_test values(1, 1.12345, 0xC2A2, 0x00FF), (2, null, '', null), (3, null, null, null);
-create table vitess_a(eid bigint default 1, id int default 1, name varchar(128), foo varbinary(128), primary key(eid, id)) comment 'vitess_nocache';
-create table vitess_b(eid bigint, id int, primary key(eid, id)) comment 'vitess_nocache';
-create table vitess_c(eid bigint, name varchar(128), foo varbinary(128), primary key(eid, name)) comment 'vitess_nocache';
-create table vitess_d(eid bigint, id int) comment 'vitess_nocache';
-create table vitess_e(eid bigint auto_increment, id int default 1, name varchar(128) default 'name', foo varchar(128), primary key(eid, id, name)) comment 'vitess_nocache';
-create table vitess_f(vb varbinary(16) default 'ab', id int, primary key(vb)) comment 'vitess_nocache';
-create table upsert_test(id1 int, id2 int, primary key (id1)) comment 'vitess_nocache';
+create table vitess_a(eid bigint default 1, id int default 1, name varchar(128), foo varbinary(128), primary key(eid, id));
+create table vitess_b(eid bigint, id int, primary key(eid, id));
+create table vitess_c(eid bigint, name varchar(128), foo varbinary(128), primary key(eid, name));
+create table vitess_d(eid bigint, id int);
+create table vitess_e(eid bigint auto_increment, id int default 1, name varchar(128) default 'name', foo varchar(128), primary key(eid, id, name));
+create table vitess_f(vb varbinary(16) default 'ab', id int, primary key(vb));
+create table upsert_test(id1 int, id2 int, primary key (id1));
create unique index id2_idx on upsert_test(id2);
insert into vitess_a(eid, id, name, foo) values(1, 1, 'abcd', 'efgh'), (1, 2, 'bcde', 'fghi');
insert into vitess_b(eid, id) values(1, 1), (1, 2);
insert into vitess_c(eid, name, foo) values(10, 'abcd', '20'), (11, 'bcde', '30');
-create table vitess_mixed_case(Col1 int, COL2 int, primary key(col1)) comment 'vitess_nocache';
+create table vitess_mixed_case(Col1 int, COL2 int, primary key(col1));
-create table vitess_cached1(eid bigint, name varchar(128), foo varbinary(128), primary key(eid));
-create index aname1 on vitess_cached1(name);
-insert into vitess_cached1 values (1, 'a', 'abcd');
-insert into vitess_cached1 values (2, 'a', 'abcd');
-insert into vitess_cached1 values (3, 'c', 'abcd');
-insert into vitess_cached1 values (4, 'd', 'abcd');
-insert into vitess_cached1 values (5, 'e', 'efgh');
-insert into vitess_cached1 values (9, 'i', 'ijkl');
+create table vitess_big(id int, string1 varchar(128), string2 varchar(100), string3 char(1), string4 varchar(50), string5 varchar(50), string6 varchar(16), string7 varchar(120), bigint1 bigint(20), bigint2 bigint(20), integer1 int, tinyint1 tinyint(4), primary key(id));
-create table vitess_cached2(eid bigint, bid varbinary(16), name varchar(128), foo varbinary(128), primary key(eid, bid));
-create index aname2 on vitess_cached2(eid, name);
-insert into vitess_cached2 values (1, 'foo', 'abcd1', 'efgh');
-insert into vitess_cached2 values (1, 'bar', 'abcd1', 'efgh');
-insert into vitess_cached2 values (2, 'foo', 'abcd2', 'efgh');
-insert into vitess_cached2 values (2, 'bar', 'abcd2', 'efgh');
-
-create table vitess_big(id int, string1 varchar(128), string2 varchar(100), string3 char(1), string4 varchar(50), string5 varchar(50), string6 varchar(16), string7 varchar(120), bigint1 bigint(20), bigint2 bigint(20), integer1 int, tinyint1 tinyint(4), primary key(id)) comment 'vitess_big';
-
-create table vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny)) comment 'vitess_nocache';
-create table vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id)) comment 'vitess_nocache';
-create table vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(4), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb)) comment 'vitess_nocache';
-create table vitess_misc(id int, b bit(8), d date, dt datetime, t time, primary key(id)) comment 'vitess_nocache';
+create table vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny));
+create table vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id));
+create table vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(4), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb));
+create table vitess_misc(id int, b bit(8), d date, dt datetime, t time, primary key(id));
create table vitess_unsupported(id int, pt point, primary key(id));
-create table vitess_part1(key1 bigint, key2 bigint, data1 int, primary key(key1, key2));
-create unique index vitess_key2 on vitess_part1(key2);
-create table vitess_part2(key3 bigint, data2 int, primary key(key3));
-create view vitess_view as select key2, key1, data1, data2 from vitess_part1, vitess_part2 where key2=key3;
-insert into vitess_part1 values(10, 1, 1);
-insert into vitess_part1 values(10, 2, 2);
-insert into vitess_part2 values(1, 3);
-insert into vitess_part2 values(2, 4);
-
create table vitess_seq(id int, next_id bigint, cache bigint, increment bigint, primary key(id)) comment 'vitess_sequence';
insert into vitess_seq values(0, 1, 3, 2);
@@ -238,25 +200,3 @@ var tableACLConfig = `{
}
]
}`
-
-var schemaOverrideJSON = `[{
- "Name": "vitess_view",
- "PKColumns": ["key2"],
- "Cache": {
- "Type": "RW"
- }
-}, {
- "Name": "vitess_part1",
- "PKColumns": ["key2"],
- "Cache": {
- "Type": "W",
- "Table": "vitess_view"
- }
-}, {
- "Name": "vitess_part2",
- "PKColumns": ["key3"],
- "Cache": {
- "Type": "W",
- "Table": "vitess_view"
- }
-}]`
diff --git a/go/vt/tabletserver/endtoend/nocache_test.go b/go/vt/tabletserver/endtoend/misc_test.go
similarity index 100%
rename from go/vt/tabletserver/endtoend/nocache_test.go
rename to go/vt/tabletserver/endtoend/misc_test.go
diff --git a/go/vt/tabletserver/endtoend/nocache_case_test.go b/go/vt/tabletserver/endtoend/queries_test.go
similarity index 96%
rename from go/vt/tabletserver/endtoend/nocache_case_test.go
rename to go/vt/tabletserver/endtoend/queries_test.go
index fc14d98c8a1..b6a6079af61 100644
--- a/go/vt/tabletserver/endtoend/nocache_case_test.go
+++ b/go/vt/tabletserver/endtoend/queries_test.go
@@ -18,13 +18,7 @@ RowsAffected mismatch: 2, want 1
Rewritten mismatch:
'[select eid, id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1 select /* fail */ eid, id from vitess_a union select eid, id from vitess_b]' does not match
'[select eid id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1 select /* fail */ eid, id from vitess_a union select eid, id from vitess_b]'
-Plan mismatch: PASS_SELECT, want aa
-Hits mismatch on table stats: 0, want 1
-Hits mismatch on query info: 0, want 1
-Misses mismatch on table stats: 0, want 2
-Misses mismatch on query info: 0, want 2
-Absent mismatch on table stats: 0, want 3
-Absent mismatch on query info: 0, want 3`
+Plan mismatch: PASS_SELECT, want aa`
func TestTheFramework(t *testing.T) {
client := framework.NewClient()
@@ -41,11 +35,8 @@ func TestTheFramework(t *testing.T) {
"select eid id from vitess_a where 1 != 1 union select eid, id from vitess_b where 1 != 1",
"select /* fail */ eid, id from vitess_a union select eid, id from vitess_b",
},
- Plan: "aa",
- Table: "bb",
- Hits: 1,
- Misses: 2,
- Absent: 3,
+ Plan: "aa",
+ Table: "bb",
}
err := expectFail.Test("", client)
if err == nil || err.Error() != frameworkErrors {
@@ -547,29 +538,6 @@ func TestNocacheCases(t *testing.T) {
framework.TestQuery("commit"),
},
},
- &framework.MultiCase{
- Name: "insert with qualified column name",
- Cases: []framework.Testable{
- framework.TestQuery("begin"),
- &framework.TestCase{
- Query: "insert /* qualified */ into vitess_a(vitess_a.eid, id, name, foo) values (4, 1, 'aaaa', 'cccc')",
- Rewritten: []string{
- "insert /* qualified */ into vitess_a(vitess_a.eid, id, name, foo) values (4, 1, 'aaaa', 'cccc') /* _stream vitess_a (eid id ) (4 1 )",
- },
- RowsAffected: 1,
- },
- framework.TestQuery("commit"),
- &framework.TestCase{
- Query: "select * from vitess_a where eid = 4 and id = 1",
- Result: [][]string{
- {"4", "1", "aaaa", "cccc"},
- },
- },
- framework.TestQuery("begin"),
- framework.TestQuery("delete from vitess_a where eid>1"),
- framework.TestQuery("commit"),
- },
- },
&framework.MultiCase{
Name: "insert with mixed case column names",
Cases: []framework.Testable{
@@ -1051,29 +1019,6 @@ func TestNocacheCases(t *testing.T) {
framework.TestQuery("commit"),
},
},
- &framework.MultiCase{
- Name: "pk change with qualifed column name update",
- Cases: []framework.Testable{
- framework.TestQuery("begin"),
- &framework.TestCase{
- Query: "update vitess_a set vitess_a.eid = 2 where eid = 1 and id = 1",
- Rewritten: []string{
- "update vitess_a set vitess_a.eid = 2 where (eid = 1 and id = 1) /* _stream vitess_a (eid id ) (1 1 ) (2 1 )",
- },
- RowsAffected: 1,
- },
- framework.TestQuery("commit"),
- &framework.TestCase{
- Query: "select eid from vitess_a where id = 1",
- Result: [][]string{
- {"2"},
- },
- },
- framework.TestQuery("begin"),
- framework.TestQuery("update vitess_a set eid=1 where id=1"),
- framework.TestQuery("commit"),
- },
- },
&framework.MultiCase{
Name: "partial pk update",
Cases: []framework.Testable{
diff --git a/go/vt/tabletserver/fakecacheservice/fakecacheservice.go b/go/vt/tabletserver/fakecacheservice/fakecacheservice.go
deleted file mode 100644
index 843e105f5b2..00000000000
--- a/go/vt/tabletserver/fakecacheservice/fakecacheservice.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2015, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fakecacheservice provides a fake implementation of cacheservice.CacheService
-package fakecacheservice
-
-import (
- "errors"
- "fmt"
- "math/rand"
- "sync"
- "time"
-
- cs "github.com/youtube/vitess/go/cacheservice"
- "github.com/youtube/vitess/go/sync2"
-)
-
-var errCacheService = errors.New("cacheservice error")
-
-// FakeCacheService is a fake implementation of CacheService
-type FakeCacheService struct {
- cache *Cache
-}
-
-// Cache is a cache like data structure.
-type Cache struct {
- mu sync.Mutex
- data map[string]*cs.Result
- enableCacheServiceError sync2.AtomicInt32
-}
-
-// Set sets a key and associated value to the cache.
-func (cache *Cache) Set(key string, val *cs.Result) {
- cache.mu.Lock()
- defer cache.mu.Unlock()
- var newVal cs.Result
- newVal = *val
- cache.data[key] = &newVal
-}
-
-// Get gets the value from cache given the key.
-func (cache *Cache) Get(key string) (*cs.Result, bool) {
- cache.mu.Lock()
- defer cache.mu.Unlock()
- val, ok := cache.data[key]
- if !ok {
- return nil, ok
- }
- var newVal cs.Result
- newVal = *val
- return &newVal, ok
-}
-
-// Delete deletes the given key from the cache.
-func (cache *Cache) Delete(key string) {
- cache.mu.Lock()
- defer cache.mu.Unlock()
- delete(cache.data, key)
-}
-
-// Clear empties the cache.
-func (cache *Cache) Clear() {
- cache.mu.Lock()
- defer cache.mu.Unlock()
- cache.data = make(map[string]*cs.Result)
-}
-
-// EnableCacheServiceError makes cache service return error.
-func (cache *Cache) EnableCacheServiceError() {
- cache.enableCacheServiceError.Set(1)
-}
-
-// DisableCacheServiceError makes cache service back to normal.
-func (cache *Cache) DisableCacheServiceError() {
- cache.enableCacheServiceError.Set(0)
-}
-
-// NewFakeCacheService creates a FakeCacheService
-func NewFakeCacheService(cache *Cache) *FakeCacheService {
- return &FakeCacheService{
- cache: cache,
- }
-}
-
-// Get returns cached data for given keys.
-func (service *FakeCacheService) Get(keys ...string) ([]cs.Result, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return nil, errCacheService
- }
- results := make([]cs.Result, 0, len(keys))
- for _, key := range keys {
- if val, ok := service.cache.Get(key); ok {
- results = append(results, *val)
- }
- }
- return results, nil
-}
-
-// Gets returns cached data for given keys, it is an alternative Get api
-// for using with CAS. Gets returns a CAS identifier with the item. If
-// the item's CAS value has changed since you Gets'ed it, it will not be stored.
-func (service *FakeCacheService) Gets(keys ...string) ([]cs.Result, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return nil, errCacheService
- }
- results := make([]cs.Result, 0, len(keys))
- for _, key := range keys {
- if val, ok := service.cache.Get(key); ok {
- val.Cas = uint64(rand.Int63())
- service.cache.Set(key, val)
- results = append(results, *val)
- }
- }
- return results, nil
-}
-
-// Set set the value with specified cache key.
-func (service *FakeCacheService) Set(key string, flags uint16, timeout uint64, value []byte) (bool, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return false, errCacheService
- }
- service.cache.Set(key, &cs.Result{
- Key: key,
- Value: value,
- Flags: flags,
- Cas: 0,
- })
- return true, nil
-}
-
-// Add store the value only if it does not already exist.
-func (service *FakeCacheService) Add(key string, flags uint16, timeout uint64, value []byte) (bool, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return false, errCacheService
- }
- if _, ok := service.cache.Get(key); ok {
- return false, nil
- }
- service.cache.Set(key, &cs.Result{
- Key: key,
- Value: value,
- Flags: flags,
- Cas: 0,
- })
- return true, nil
-}
-
-// Replace replaces the value, only if the value already exists,
-// for the specified cache key.
-func (service *FakeCacheService) Replace(key string, flags uint16, timeout uint64, value []byte) (bool, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return false, errCacheService
- }
- result, ok := service.cache.Get(key)
- if !ok {
- return false, nil
- }
- result.Flags = flags
- result.Value = value
- service.cache.Set(key, result)
- return true, nil
-}
-
-// Append appends the value after the last bytes in an existing item.
-func (service *FakeCacheService) Append(key string, flags uint16, timeout uint64, value []byte) (bool, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return false, errCacheService
- }
- result, ok := service.cache.Get(key)
- if !ok {
- return false, nil
- }
- result.Flags = flags
- result.Value = append(result.Value, value...)
- service.cache.Set(key, result)
- return true, nil
-}
-
-// Prepend prepends the value before existing value.
-func (service *FakeCacheService) Prepend(key string, flags uint16, timeout uint64, value []byte) (bool, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return false, errCacheService
- }
- result, ok := service.cache.Get(key)
- if !ok {
- return false, nil
- }
- result.Flags = flags
- result.Value = append(value, result.Value...)
- service.cache.Set(key, result)
- return true, nil
-}
-
-// Cas stores the value only if no one else has updated the data since you read it last.
-func (service *FakeCacheService) Cas(key string, flags uint16, timeout uint64, value []byte, cas uint64) (bool, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return false, errCacheService
- }
- result, ok := service.cache.Get(key)
- if !ok || result.Cas != cas {
- return false, nil
- }
- result.Flags = flags
- result.Value = value
- result.Cas = cas
- service.cache.Set(key, result)
- return true, nil
-}
-
-// Delete delete the value for the specified cache key.
-func (service *FakeCacheService) Delete(key string) (bool, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return false, errCacheService
- }
- service.cache.Delete(key)
- return true, nil
-}
-
-// FlushAll purges the entire cache.
-func (service *FakeCacheService) FlushAll() error {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return errCacheService
- }
- service.cache.Clear()
- return nil
-}
-
-// Stats returns a list of basic stats.
-func (service *FakeCacheService) Stats(key string) ([]byte, error) {
- if service.cache.enableCacheServiceError.Get() == 1 {
- return nil, errCacheService
- }
- return []byte{}, nil
-}
-
-// Close closes the CacheService
-func (service *FakeCacheService) Close() {
-}
-
-// Register registers a fake implementation of cacheservice.CacaheService and returns its registered name
-func Register() *Cache {
- name := fmt.Sprintf("fake-%d", rand.Int63())
- cache := &Cache{data: make(map[string]*cs.Result)}
- cs.Register(name, func(cs.Config) (cs.CacheService, error) {
- return NewFakeCacheService(cache), nil
- })
- cs.DefaultCacheService = name
- return cache
-}
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
diff --git a/go/vt/tabletserver/fakecacheservice/fakecacheservice_test.go b/go/vt/tabletserver/fakecacheservice/fakecacheservice_test.go
deleted file mode 100644
index bf09f37a4be..00000000000
--- a/go/vt/tabletserver/fakecacheservice/fakecacheservice_test.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2015, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package fakecacheservice
-
-import (
- "reflect"
- "testing"
-
- cs "github.com/youtube/vitess/go/cacheservice"
-)
-
-func TestRegisterFakeCacheService(t *testing.T) {
- Register()
- service, err := cs.Connect(cs.Config{})
- if err != nil {
- t.Fatalf("got error when creating a new fake cacheservice: %v", err)
- }
- if s, ok := service.(*FakeCacheService); !ok {
- t.Fatalf("created service is not a fake cacheservice, cacheservice: %v", s)
- }
-}
-
-func TestFakeCacheService(t *testing.T) {
- service := NewFakeCacheService(&Cache{data: make(map[string]*cs.Result)})
- key1 := "key1"
- key2 := "key2"
- keys := []string{key1, key2}
- results, err := service.Get(keys...)
- if err != nil {
- t.Fatalf("get error: %v", err)
- }
- if !reflect.DeepEqual(results, []cs.Result{}) {
- t.Fatalf("get should return empty results, but get: %v", results)
- }
- // test Set then Get
- service.Set(key1, 0, 0, []byte("test"))
- results, err = service.Get(key1)
- if !reflect.DeepEqual(results[0].Value, []byte("test")) {
- t.Fatalf("expect to get value: test, but get: %s", string(results[0].Value))
- }
- // test Gets and Cas
- results, err = service.Gets(key1)
- if results[0].Cas == 0 {
- t.Fatalf("cas should be set")
- }
- stored, err := service.Cas(key1, 0, 0, []byte("test2"), 0)
- if stored {
- t.Fatalf("cas operation should fail")
- }
- stored, err = service.Cas(key1, 0, 0, []byte("test2"), results[0].Cas)
- if !stored {
- t.Fatalf("cas operation should succeed")
- }
- // test Add
- stored, err = service.Add(key1, 0, 0, []byte("test3"))
- if stored {
- t.Fatalf("key already exists, Add should fail")
- }
- stored, err = service.Add(key2, 0, 0, []byte("test3"))
- if !stored {
- t.Fatalf("key does not exist and Add should succeed")
- }
- // test Replace
- stored, err = service.Replace("unknownKey", 0, 0, []byte("test4"))
- if stored {
- t.Fatalf("key does not exist, Replace should fail")
- }
- service.Set(key2, 0, 0, []byte("test3"))
- stored, err = service.Replace(key2, 0, 0, []byte("test4"))
- results, err = service.Get(key2)
- if !stored || !reflect.DeepEqual(results[0].Value, []byte("test4")) {
- t.Fatalf("key already exists, Replace should succeed, expect to get: %s, but got: %s", "test4", string(results[0].Value))
- }
- // test Append
- stored, err = service.Append("unknownKey", 0, 0, []byte("test5"))
- if stored {
- t.Fatalf("key does not exist, Append should fail")
- }
- service.Set(key2, 0, 0, []byte("test4"))
- stored, err = service.Append(key2, 0, 0, []byte("test5"))
- results, err = service.Get(key2)
- if !stored || !reflect.DeepEqual(results[0].Value, []byte("test4test5")) {
- t.Fatalf("key already exists, Append should succeed")
- }
- // test Prepend
- stored, err = service.Prepend("unknownKey", 0, 0, []byte("test5"))
- if stored {
- t.Fatalf("key does not exist, Prepend should fail")
- }
- service.Set(key2, 0, 0, []byte("test4"))
- stored, err = service.Prepend(key2, 0, 0, []byte("test5"))
- results, err = service.Get(key2)
- if !stored || !reflect.DeepEqual(results[0].Value, []byte("test5test4")) {
- t.Fatalf("key already exists, Prepend should succeed")
- }
- // test Delete
- service.Set(key2, 0, 0, []byte("aaa"))
- results, err = service.Get(key2)
- if !reflect.DeepEqual(results[0].Value, []byte("aaa")) {
- t.Fatalf("set key does not succeed")
- }
- if ok, _ := service.Delete(key2); !ok {
- t.Fatalf("delete should succeed")
- }
- results, err = service.Get(key2)
- if !reflect.DeepEqual(results, []cs.Result{}) {
- t.Fatalf("key does not exists, should get empty result, but got: %v", results)
- }
- // test FlushAll
- service.Set(key1, 0, 0, []byte("aaa"))
- service.Set(key2, 0, 0, []byte("bbb"))
- err = service.FlushAll()
- if err != nil {
- t.Fatalf("FlushAll failed")
- }
- results, err = service.Get(key1, key2)
- if !reflect.DeepEqual(results, []cs.Result{}) {
- t.Fatalf("cache has been flushed, should only get empty results")
- }
- service.Stats("")
- service.Close()
-}
-
-func TestFakeCacheServiceError(t *testing.T) {
- service := NewFakeCacheService(&Cache{data: make(map[string]*cs.Result)})
- service.cache.EnableCacheServiceError()
- key1 := "key1"
- _, err := service.Set(key1, 0, 0, []byte("test"))
- checkCacheServiceError(t, err)
- _, err = service.Get(key1)
- checkCacheServiceError(t, err)
- _, err = service.Gets(key1)
- checkCacheServiceError(t, err)
- _, err = service.Cas(key1, 0, 0, []byte("test2"), 0)
- checkCacheServiceError(t, err)
- _, err = service.Add(key1, 0, 0, []byte("test3"))
- checkCacheServiceError(t, err)
- _, err = service.Replace("unknownKey", 0, 0, []byte("test4"))
- checkCacheServiceError(t, err)
- _, err = service.Append("unknownKey", 0, 0, []byte("test5"))
- checkCacheServiceError(t, err)
- _, err = service.Prepend("unknownKey", 0, 0, []byte("test5"))
- checkCacheServiceError(t, err)
- _, err = service.Prepend(key1, 0, 0, []byte("test5"))
- checkCacheServiceError(t, err)
- _, err = service.Delete(key1)
- checkCacheServiceError(t, err)
- err = service.FlushAll()
- checkCacheServiceError(t, err)
- _, err = service.Stats("")
- checkCacheServiceError(t, err)
-
- service.cache.DisableCacheServiceError()
- ok, err := service.Set(key1, 0, 0, []byte("test"))
- if !ok || err != nil {
- t.Fatalf("set should succeed")
- }
- results, err := service.Get(key1)
- if !reflect.DeepEqual(results[0].Value, []byte("test")) {
- t.Fatalf("expect to get value: test, but get: %s", string(results[0].Value))
- }
- service.Close()
-}
-
-func checkCacheServiceError(t *testing.T, err error) {
- if err != errCacheService {
- t.Fatalf("should get cacheservice error")
- }
-}
diff --git a/go/vt/tabletserver/logstats.go b/go/vt/tabletserver/logstats.go
index 00dab886dd4..c591d447d42 100644
--- a/go/vt/tabletserver/logstats.go
+++ b/go/vt/tabletserver/logstats.go
@@ -24,10 +24,8 @@ import (
var StatsLogger = streamlog.New("TabletServer", 50)
const (
- // QuerySourceRowcache means query result is found in rowcache.
- QuerySourceRowcache = 1 << iota
// QuerySourceConsolidator means query result is found in consolidator.
- QuerySourceConsolidator
+ QuerySourceConsolidator = 1 << iota
// QuerySourceMySQL means query result is returned from MySQL.
QuerySourceMySQL
)
@@ -45,10 +43,6 @@ type LogStats struct {
EndTime time.Time
MysqlResponseTime time.Duration
WaitingForConnection time.Duration
- CacheHits int64
- CacheAbsent int64
- CacheMisses int64
- CacheInvalidations int64
QuerySources byte
Rows [][]sqltypes.Value
TransactionID int64
@@ -157,16 +151,12 @@ func (stats *LogStats) FmtQuerySources() string {
if stats.QuerySources == 0 {
return "none"
}
- sources := make([]string, 3)
+ sources := make([]string, 2)
n := 0
if stats.QuerySources&QuerySourceMySQL != 0 {
sources[n] = "mysql"
n++
}
- if stats.QuerySources&QuerySourceRowcache != 0 {
- sources[n] = "rowcache"
- n++
- }
if stats.QuerySources&QuerySourceConsolidator != 0 {
sources[n] = "consolidator"
n++
@@ -205,7 +195,7 @@ func (stats *LogStats) Format(params url.Values) string {
// TODO: remove username here we fully enforce immediate caller id
remoteAddr, username := stats.RemoteAddrUsername()
return fmt.Sprintf(
- "%v\t%v\t%v\t'%v'\t'%v'\t%v\t%v\t%.6f\t%v\t%q\t%v\t%v\t%q\t%v\t%.6f\t%.6f\t%v\t%v\t%v\t%v\t%v\t%v\t%q\t\n",
+ "%v\t%v\t%v\t'%v'\t'%v'\t%v\t%v\t%.6f\t%v\t%q\t%v\t%v\t%q\t%v\t%.6f\t%.6f\t%v\t%v\t%q\t\n",
stats.Method,
remoteAddr,
username,
@@ -224,10 +214,6 @@ func (stats *LogStats) Format(params url.Values) string {
stats.WaitingForConnection.Seconds(),
stats.RowsAffected,
stats.SizeOfResponse(),
- stats.CacheHits,
- stats.CacheMisses,
- stats.CacheAbsent,
- stats.CacheInvalidations,
stats.ErrorStr(),
)
}
diff --git a/go/vt/tabletserver/logstats_test.go b/go/vt/tabletserver/logstats_test.go
index e542d5ae4e3..33a399ce9bf 100644
--- a/go/vt/tabletserver/logstats_test.go
+++ b/go/vt/tabletserver/logstats_test.go
@@ -81,11 +81,6 @@ func TestLogStatsFormatQuerySources(t *testing.T) {
t.Fatalf("'mysql' should be in formated query sources")
}
- logStats.QuerySources |= QuerySourceRowcache
- if !strings.Contains(logStats.FmtQuerySources(), "rowcache") {
- t.Fatalf("'rowcache' should be in formated query sources")
- }
-
logStats.QuerySources |= QuerySourceConsolidator
if !strings.Contains(logStats.FmtQuerySources(), "consolidator") {
t.Fatalf("'consolidator' should be in formated query sources")
diff --git a/go/vt/tabletserver/memcache_stats.go b/go/vt/tabletserver/memcache_stats.go
deleted file mode 100644
index 031a3276dd0..00000000000
--- a/go/vt/tabletserver/memcache_stats.go
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright 2013, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tabletserver
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "strings"
- "sync"
- "time"
-
- log "github.com/golang/glog"
- "github.com/youtube/vitess/go/stats"
- "github.com/youtube/vitess/go/timer"
-)
-
-var mainStringMetrics = map[string]bool{
- "accepting_conns": false,
- "auth_cmds": false,
- "auth_errors": false,
- "bytes_read": false,
- "bytes_written": false,
- "bytes": false,
- "cas_badval": false,
- "cas_hits": false,
- "cas_misses": false,
- "cmd_flush": false,
- "cmd_get": false,
- "cmd_set": false,
- "cmd_touch": false,
- "conn_yields": false,
- "connection_structures": false,
- "curr_connections": false,
- "curr_items": false,
- "decr_hits": false,
- "decr_misses": false,
- "delete_hits": false,
- "delete_misses": false,
- "evicted_unfetched": false,
- "evictions": false,
- "expired_unfetched": false,
- "get_hits": false,
- "get_misses": false,
- "hash_bytes": false,
- "hash_is_expanding": false,
- "hash_power_level": false,
- "incr_hits": false,
- "incr_misses": false,
- "libevent": true,
- "limit_maxbytes": false,
- "listen_disabled_num": false,
- "pid": false,
- "pointer_size": false,
- "reclaimed": false,
- "reserved_fds": false,
- "rusage_system": true,
- "rusage_user": true,
- "threads": false,
- "time": false,
- "total_connections": false,
- "total_items": false,
- "touch_hits": false,
- "touch_misses": false,
- "uptime": false,
- "version": true,
-}
-
-var slabsSingleMetrics = map[string]bool{
- "active_slabs": true,
- "cas_badval": false,
- "cas_hits": false,
- "chunk_size": false,
- "chunks_per_page": false,
- "cmd_set": false,
- "decr_hits": false,
- "delete_hits": false,
- "free_chunks_end": false,
- "free_chunks": false,
- "get_hits": false,
- "incr_hits": false,
- "mem_requested": false,
- "total_chunks": false,
- "total_malloced": true,
- "total_pages": false,
- "touch_hits": false,
- "used_chunks": false,
-}
-
-var itemsMetrics = []string{
- "age",
- "evicted",
- "evicted_nonzero",
- "evicted_time",
- "evicted_unfetched",
- "expired_unfetched",
- "number",
- "outofmemory",
- "reclaimed",
- "tailrepairs",
-}
-
-// RetrieveCacheStats returns current memcache stats.
-type RetrieveCacheStats func(key string) string
-
-// MemcacheStats exports the Memcache internal stats through stats package.
-type MemcacheStats struct {
- ticks *timer.Timer
- mu sync.Mutex
- main map[string]string
- slabs map[string]map[string]int64
- items map[string]map[string]int64
- statsPrefix string
- statsFunc RetrieveCacheStats
- queryServiceStats *QueryServiceStats
- flags int64
-}
-
-const (
- enableMain = 1 << iota
- enableSlabs
- enableItems
-)
-
-// NewMemcacheStats creates a new MemcacheStats.
-// main, slabs and items specify the categories of stats that need to be exported.
-func NewMemcacheStats(
- statsPrefix string,
- refreshFreq time.Duration,
- flags int64,
- queryServiceStats *QueryServiceStats,
- statsFunc RetrieveCacheStats) *MemcacheStats {
- memstats := &MemcacheStats{
- ticks: timer.NewTimer(refreshFreq),
- statsPrefix: statsPrefix,
- statsFunc: statsFunc,
- main: make(map[string]string),
- slabs: make(map[string]map[string]int64),
- items: make(map[string]map[string]int64),
- queryServiceStats: queryServiceStats,
- flags: flags,
- }
- if flags&enableMain > 0 {
- memstats.publishMainStats()
- }
- if flags&enableSlabs > 0 {
- memstats.publishSlabsStats()
- }
- if flags*enableItems > 0 {
- memstats.publishItemsStats()
- }
- return memstats
-}
-
-// Open starts exporting the stats.
-func (memstats *MemcacheStats) Open() {
- memstats.ticks.Start(func() { memstats.update() })
-}
-
-// Close clears the variable values and stops exporting the stats.
-func (memstats *MemcacheStats) Close() {
- memstats.ticks.Stop()
-
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- for key := range memstats.main {
- if mainStringMetrics[key] {
- memstats.main[key] = ""
- } else {
- memstats.main[key] = "0"
- }
- }
- for key := range memstats.slabs {
- memstats.slabs[key] = make(map[string]int64)
- }
- for key := range memstats.items {
- memstats.items[key] = make(map[string]int64)
- }
-}
-
-func (memstats *MemcacheStats) update() {
- if memstats.flags&enableMain > 0 {
- memstats.updateMainStats()
- }
- if memstats.flags&enableSlabs > 0 {
- memstats.updateSlabsStats()
- }
- if memstats.flags&enableItems > 0 {
- memstats.updateItemsStats()
- }
-}
-
-func (memstats *MemcacheStats) publishMainStats() {
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- for k, isstr := range mainStringMetrics {
- key := k
- if isstr {
- memstats.main[key] = ""
- stats.Publish(memstats.statsPrefix+"Memcache"+formatKey(key), stats.StringFunc(func() string {
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- return memstats.main[key]
- }))
- } else {
- memstats.main[key] = "0"
- stats.Publish(memstats.statsPrefix+"Memcache"+formatKey(key), stats.IntFunc(func() int64 {
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- ival, err := strconv.ParseInt(memstats.main[key], 10, 64)
- if err != nil {
- log.Errorf("value '%v' for key %v is not an int", memstats.main[key], key)
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- return -1
- }
- return ival
- }))
- }
- }
-}
-
-func (memstats *MemcacheStats) updateMainStats() {
- memstats.readStats("", func(sKey, sValue string) {
- memstats.main[sKey] = sValue
- })
-}
-
-func (memstats *MemcacheStats) publishSlabsStats() {
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- for key, isSingle := range slabsSingleMetrics {
- key := key
- memstats.slabs[key] = make(map[string]int64)
- if isSingle {
- stats.Publish(memstats.statsPrefix+"MemcacheSlabs"+formatKey(key), stats.IntFunc(func() int64 {
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- return memstats.slabs[key][""]
- }))
- } else {
- stats.Publish(memstats.statsPrefix+"MemcacheSlabs"+formatKey(key), stats.CountersFunc(func() map[string]int64 {
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- return copyMap(memstats.slabs[key])
- }))
- }
- }
-}
-
-func (memstats *MemcacheStats) updateSlabsStats() {
- memstats.readStats("slabs", func(sKey, sValue string) {
- ival, err := strconv.ParseInt(sValue, 10, 64)
- if err != nil {
- log.Error(err)
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- return
- }
- if slabsSingleMetrics[sKey] {
- m, ok := memstats.slabs[sKey]
- if !ok {
- log.Errorf("Unknown memcache slabs stats %v: %v", sKey, ival)
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- return
- }
- m[""] = ival
- return
- }
- subkey, slabid, err := parseSlabKey(sKey)
- if err != nil {
- log.Error(err)
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- return
- }
- m, ok := memstats.slabs[subkey]
- if !ok {
- log.Errorf("Unknown memcache slabs stats %v %v: %v", subkey, slabid, ival)
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- return
- }
- m[slabid] = ival
- })
-}
-
-func (memstats *MemcacheStats) publishItemsStats() {
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- for _, key := range itemsMetrics {
- key := key // create local var to keep current key
- memstats.items[key] = make(map[string]int64)
- stats.Publish(memstats.statsPrefix+"MemcacheItems"+formatKey(key), stats.CountersFunc(func() map[string]int64 {
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- return copyMap(memstats.items[key])
- }))
- }
-}
-
-func (memstats *MemcacheStats) updateItemsStats() {
- memstats.readStats("items", func(sKey, sValue string) {
- ival, err := strconv.ParseInt(sValue, 10, 64)
- if err != nil {
- log.Error(err)
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- return
- }
- subkey, slabid, err := parseItemKey(sKey)
- if err != nil {
- log.Error(err)
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- return
- }
- m, ok := memstats.items[subkey]
- if !ok {
- log.Errorf("Unknown memcache items stats %v %v: %v", subkey, slabid, ival)
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- return
- }
- m[slabid] = ival
- })
-}
-
-func (memstats *MemcacheStats) readStats(k string, proc func(key, value string)) {
- defer func() {
- if x := recover(); x != nil {
- _, ok := x.(*TabletError)
- if !ok {
- log.Errorf("Uncaught panic when reading memcache stats: %v", x)
- } else {
- log.Errorf("Could not read memcache stats: %v", x)
- }
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- }
- }()
-
- stats := memstats.statsFunc(k)
- if stats == "" {
- return
- }
-
- memstats.mu.Lock()
- defer memstats.mu.Unlock()
- lines := strings.Split(stats, "\n")
- for _, line := range lines {
- if line == "" {
- continue
- }
- items := strings.Split(line, " ")
- //if using apt-get, memcached info would be:STAT version 1.4.14 (Ubuntu)
- //so less then 3 would be compatible with original memcached
- if len(items) < 3 {
- log.Errorf("Unexpected stats: %v", line)
- memstats.queryServiceStats.InternalErrors.Add("MemcacheStats", 1)
- continue
- }
- proc(items[1], items[2])
- }
-}
-
-func formatKey(key string) string {
- key = regexp.MustCompile("^[a-z]").ReplaceAllStringFunc(key, func(item string) string {
- return strings.ToUpper(item)
- })
- key = regexp.MustCompile("_[a-z]").ReplaceAllStringFunc(key, func(item string) string {
- return strings.ToUpper(item[1:])
- })
- return key
-}
-
-// parseSlabKey splits a slab key into the subkey and slab id:
-// "1:chunk_size" -> "chunk_size", 1
-func parseSlabKey(key string) (subkey string, slabid string, err error) {
- tokens := strings.Split(key, ":")
- if len(tokens) != 2 {
- return "", "", fmt.Errorf("invalid slab key: %v", key)
- }
- return tokens[1], tokens[0], nil
-}
-
-// parseItemKey splits an item key into the subkey and slab id:
-// "items:1:number" -> "number", 1
-func parseItemKey(key string) (subkey string, slabid string, err error) {
- tokens := strings.Split(key, ":")
- if len(tokens) != 3 {
- return "", "", fmt.Errorf("invalid slab key: %v", key)
- }
- return tokens[2], tokens[1], nil
-}
-
-func copyMap(src map[string]int64) map[string]int64 {
- dst := make(map[string]int64, len(src))
- for k, v := range src {
- dst[k] = v
- }
- return dst
-}
diff --git a/go/vt/tabletserver/memcache_stats_test.go b/go/vt/tabletserver/memcache_stats_test.go
deleted file mode 100644
index 67964048cdf..00000000000
--- a/go/vt/tabletserver/memcache_stats_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2015, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tabletserver
-
-import (
- "expvar"
- "fmt"
- "math/rand"
- "testing"
- "time"
-
- vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc"
-)
-
-func TestMemcacheStats(t *testing.T) {
- statsPrefix := newStatsPrefix()
- memcacheStats := NewMemcacheStats(
- statsPrefix, 1*time.Second, enableMain, NewQueryServiceStats("", false),
- func(key string) string {
- switch key {
- case "slabs":
- return ""
- case "items":
- return ""
- }
- return "STAT threads 1\n"
- },
- )
- memcacheStats.Open()
- defer memcacheStats.Close()
- memcacheStats.update()
- checkMemcacheExpvar(t, statsPrefix+"MemcacheThreads", "1")
-}
-
-func TestMemcacheStatsInvalidMainStatsValueType(t *testing.T) {
- statsPrefix := newStatsPrefix()
- memcacheStats := NewMemcacheStats(
- statsPrefix, 1*time.Second, enableMain, NewQueryServiceStats("", false),
- func(key string) string {
- switch key {
- case "slabs":
- return ""
- case "items":
- return ""
- }
- return "STAT threads invalid_val\n" +
- // incomplete stats
- "STAT threads"
- },
- )
- memcacheStats.Open()
- defer memcacheStats.Close()
- memcacheStats.update()
- checkMemcacheExpvar(t, statsPrefix+"MemcacheThreads", "-1")
-}
-
-func TestMemcacheStatsSlabsStats(t *testing.T) {
- statsPrefix := newStatsPrefix()
- memcacheStats := NewMemcacheStats(
- statsPrefix, 1*time.Second, enableSlabs, NewQueryServiceStats("", false),
- func(key string) string {
- switch key {
- case "slabs":
- return "STAT active_slabs 5\n" +
- "STAT 1:total_pages 1\n" +
- // invalid value
- "STAT 1:total_chunks invalid_val\n" +
- // invalid key format
- "STAT 1:used_chunks:invalid 10081\n" +
- // unknown slab metric
- "STAT 1:unknown_metrics 123\n" +
- "STAT 1:free_chunks 1\n" +
- "STAT 1:free_chunks_end 10079\n"
- case "items":
- return ""
- }
- return ""
- },
- )
- memcacheStats.Open()
- defer memcacheStats.Close()
- memcacheStats.update()
- checkMemcacheExpvar(t, statsPrefix+"MemcacheSlabsActiveSlabs", `5`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheSlabsTotalPages", `{"1": 1}`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheSlabsTotalChunks", `{}`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheSlabsUsedChunks", `{}`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheSlabsFreeChunks", `{"1": 1}`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheSlabsFreeChunksEnd", `{"1": 10079}`)
-
- if expvar.Get(statsPrefix+"MemcacheSlabsUnknownMetrics") != nil {
- t.Fatalf("%s should not be exported", statsPrefix+"MemcacheSlabsUnknownMetrics")
- }
-}
-
-func TestMemcacheStatsItemsStats(t *testing.T) {
- statsPrefix := newStatsPrefix()
- memcacheStats := NewMemcacheStats(
- statsPrefix, 1*time.Second, enableItems, NewQueryServiceStats("", false),
- func(key string) string {
- switch key {
- case "slabs":
- return ""
- case "items":
- return "STAT items:2:number 1\n" +
- // invalid item value
- "STAT items:2:age invalid_value\n" +
- // invalid item key format
- "STAT items:2:age:invalid 10\n" +
- // unknown item metric
- "STAT items:2:unknown_item 20\n" +
- "STAT items:2:evicted 4\n" +
- "STAT items:2:evicted_nonzero 5\n" +
- "STAT items:2:evicted_time 2\n" +
- "STAT items:2:outofmemory 7\n" +
- "STAT items:2:tailrepairs 11\n"
- }
- return ""
- },
- )
- memcacheStats.Open()
- defer memcacheStats.Close()
- memcacheStats.update()
- checkMemcacheExpvar(t, statsPrefix+"MemcacheItemsNumber", `{"2": 1}`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheItemsEvicted", `{"2": 4}`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheItemsEvictedNonzero", `{"2": 5}`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheItemsEvictedTime", `{"2": 2}`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheItemsOutofmemory", `{"2": 7}`)
- checkMemcacheExpvar(t, statsPrefix+"MemcacheItemsTailrepairs", `{"2": 11}`)
-
- if expvar.Get(statsPrefix+"MemcacheItemsUnknownItem") != nil {
- t.Fatalf("%s should not be exported", statsPrefix+"MemcacheItemsUnknownItem")
- }
-}
-
-func TestMemcacheStatsPanic(t *testing.T) {
- statsPrefix := newStatsPrefix()
- queryServiceStats := NewQueryServiceStats("", false)
- memcacheStats := NewMemcacheStats(
- statsPrefix, 100*time.Second, enableMain, queryServiceStats,
- func(key string) string {
- panic("unknown error")
- },
- )
- errCountBefore := queryServiceStats.InternalErrors.Counts()["MemcacheStats"]
- memcacheStats.Open()
- defer memcacheStats.Close()
- memcacheStats.update()
- errCountAfter := queryServiceStats.InternalErrors.Counts()["MemcacheStats"]
- if errCountAfter-errCountBefore != 1 {
- t.Fatalf("got unknown panic, MemcacheStats counter should increase by 1")
- }
-}
-
-func TestMemcacheStatsTabletError(t *testing.T) {
- statsPrefix := newStatsPrefix()
- queryServiceStats := NewQueryServiceStats("", false)
- memcacheStats := NewMemcacheStats(
- statsPrefix, 100*time.Second, enableMain, queryServiceStats,
- func(key string) string {
- panic(NewTabletError(vtrpcpb.ErrorCode_UNKNOWN_ERROR, "unknown tablet error"))
- },
- )
- errCountBefore := queryServiceStats.InternalErrors.Counts()["MemcacheStats"]
- memcacheStats.Open()
- defer memcacheStats.Close()
- memcacheStats.update()
- errCountAfter := queryServiceStats.InternalErrors.Counts()["MemcacheStats"]
- if errCountAfter-errCountBefore != 1 {
- t.Fatalf("got tablet error, MemcacheStats counter should increase by 1")
- }
-}
-
-func checkMemcacheExpvar(t *testing.T, name string, expectedVal string) {
- val := expvar.Get(name)
- if val == nil {
- t.Fatalf("cannot find exported variable: %s", name)
- }
- if val.String() != expectedVal {
- t.Fatalf("name: %s, expect to get %s, but got: %s", name, expectedVal, val.String())
- }
-}
-
-func newStatsPrefix() string {
- return fmt.Sprintf("TestMemcache-%d-", rand.Int63())
-}
diff --git a/go/vt/tabletserver/planbuilder/dml.go b/go/vt/tabletserver/planbuilder/dml.go
index 3ad870488bb..a7b96139ea2 100644
--- a/go/vt/tabletserver/planbuilder/dml.go
+++ b/go/vt/tabletserver/planbuilder/dml.go
@@ -7,7 +7,6 @@ package planbuilder
import (
"errors"
"fmt"
- "strconv"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/vt/schema"
@@ -47,16 +46,10 @@ func analyzeUpdate(upd *sqlparser.Update, getTable TableGetter) (plan *ExecPlan,
plan.OuterQuery = GenerateUpdateOuterQuery(upd)
- if conditions := analyzeWhere(upd.Where); conditions != nil {
- pkValues, err := getPKValues(conditions, tableInfo.Indexes[0])
- if err != nil {
- return nil, err
- }
- if pkValues != nil {
- plan.PlanID = PlanDMLPK
- plan.PKValues = pkValues
- return plan, nil
- }
+ if pkValues := analyzeWhere(upd.Where, tableInfo.Indexes[0]); pkValues != nil {
+ plan.PlanID = PlanDMLPK
+ plan.PKValues = pkValues
+ return plan, nil
}
plan.PlanID = PlanDMLSubquery
@@ -88,16 +81,10 @@ func analyzeDelete(del *sqlparser.Delete, getTable TableGetter) (plan *ExecPlan,
plan.OuterQuery = GenerateDeleteOuterQuery(del)
- if conditions := analyzeWhere(del.Where); conditions != nil {
- pkValues, err := getPKValues(conditions, tableInfo.Indexes[0])
- if err != nil {
- return nil, err
- }
- if pkValues != nil {
- plan.PlanID = PlanDMLPK
- plan.PKValues = pkValues
- return plan, nil
- }
+ if pkValues := analyzeWhere(del.Where, tableInfo.Indexes[0]); pkValues != nil {
+ plan.PlanID = PlanDMLPK
+ plan.PKValues = pkValues
+ return plan, nil
}
plan.PlanID = PlanDMLSubquery
@@ -106,36 +93,19 @@ func analyzeDelete(del *sqlparser.Delete, getTable TableGetter) (plan *ExecPlan,
}
func analyzeSet(set *sqlparser.Set) (plan *ExecPlan) {
- plan = &ExecPlan{
+ return &ExecPlan{
PlanID: PlanSet,
FullQuery: GenerateFullQuery(set),
}
- if len(set.Exprs) > 1 { // Multiple set values
- return plan
- }
- updateExpr := set.Exprs[0]
- plan.SetKey = updateExpr.Name.Name.Original()
- numExpr, ok := updateExpr.Expr.(sqlparser.NumVal)
- if !ok {
- return plan
- }
- val := string(numExpr)
- if ival, err := strconv.ParseInt(val, 0, 64); err == nil {
- plan.SetValue = ival
- } else if fval, err := strconv.ParseFloat(val, 64); err == nil {
- plan.SetValue = fval
- }
- return plan
}
func analyzeUpdateExpressions(exprs sqlparser.UpdateExprs, pkIndex *schema.Index) (pkValues []interface{}, err error) {
for _, expr := range exprs {
- index := pkIndex.FindColumn(sqlparser.GetColName(expr.Name).Original())
+ index := pkIndex.FindColumn(expr.Name.Original())
if index == -1 {
continue
}
if !sqlparser.IsValue(expr.Expr) {
- log.Warningf("expression is too complex %v", expr)
return nil, ErrTooComplex
}
if pkValues == nil {
@@ -151,17 +121,17 @@ func analyzeUpdateExpressions(exprs sqlparser.UpdateExprs, pkIndex *schema.Index
}
func analyzeSelect(sel *sqlparser.Select, getTable TableGetter) (plan *ExecPlan, err error) {
- // Default plan
plan = &ExecPlan{
PlanID: PlanPassSelect,
FieldQuery: GenerateFieldQuery(sel),
FullQuery: GenerateSelectLimitQuery(sel),
}
+ if sel.Lock != "" {
+ plan.PlanID = PlanSelectLock
+ }
- // from
- tableName, hasHints := analyzeFrom(sel.From)
+ tableName := analyzeFrom(sel.From)
if tableName == "" {
- plan.Reason = ReasonTable
return plan, nil
}
tableInfo, err := plan.setTableInfo(tableName, getTable)
@@ -177,162 +147,33 @@ func analyzeSelect(sel *sqlparser.Select, getTable TableGetter) (plan *ExecPlan,
plan.PlanID = PlanNextval
plan.FieldQuery = nil
plan.FullQuery = nil
- return plan, nil
}
-
- // There are bind variables in the SELECT list
- if plan.FieldQuery == nil {
- plan.Reason = ReasonSelectList
- return plan, nil
- }
-
- if sel.Distinct != "" || sel.GroupBy != nil || sel.Having != nil {
- plan.Reason = ReasonSelect
- return plan, nil
- }
-
- // Don't improve the plan if the select is locking the row
- if sel.Lock != "" {
- plan.Reason = ReasonLock
- return plan, nil
- }
-
- // Further improvements possible only if table is row-cached
- if !tableInfo.IsReadCached() {
- plan.Reason = ReasonNocache
- return plan, nil
- }
-
- // Select expressions
- selects, err := analyzeSelectExprs(sel.SelectExprs, tableInfo)
- if err != nil {
- return nil, err
- }
- if selects == nil {
- plan.Reason = ReasonSelectList
- return plan, nil
- }
- plan.ColumnNumbers = selects
-
- // where
- conditions := analyzeWhere(sel.Where)
- if conditions == nil {
- plan.Reason = ReasonWhere
- return plan, nil
- }
-
- // order
- if sel.OrderBy != nil {
- plan.Reason = ReasonOrder
- return plan, nil
- }
-
- // This check should never fail because we only cache tables with primary keys.
- if len(tableInfo.Indexes) == 0 || tableInfo.Indexes[0].Name.Lowered() != "primary" {
- panic("unexpected")
- }
-
- pkValues, err := getPKValues(conditions, tableInfo.Indexes[0])
- if err != nil {
- return nil, err
- }
- if pkValues != nil {
- plan.IndexUsed = "PRIMARY"
- offset, rowcount, err := sel.Limit.Limits()
- if err != nil {
- return nil, err
- }
- if offset != nil {
- plan.Reason = ReasonLimit
- return plan, nil
- }
- plan.Limit = rowcount
- plan.PlanID = PlanPKIn
- plan.OuterQuery = GenerateSelectOuterQuery(sel, tableInfo)
- plan.PKValues = pkValues
- return plan, nil
- }
-
- // TODO: Analyze hints to improve plan.
- if hasHints {
- plan.Reason = ReasonHasHints
- return plan, nil
- }
-
- indexUsed := getIndexMatch(conditions, tableInfo.Indexes)
- if indexUsed == nil {
- plan.Reason = ReasonNoIndexMatch
- return plan, nil
- }
- plan.IndexUsed = indexUsed.Name.Original()
- if plan.IndexUsed == "PRIMARY" {
- plan.Reason = ReasonPKIndex
- return plan, nil
- }
- var missing bool
- for _, cnum := range selects {
- if indexUsed.FindDataColumn(tableInfo.Columns[cnum].Name.Original()) != -1 {
- continue
- }
- missing = true
- break
- }
- if !missing {
- plan.Reason = ReasonCovering
- return plan, nil
- }
- plan.PlanID = PlanSelectSubquery
- plan.OuterQuery = GenerateSelectOuterQuery(sel, tableInfo)
- plan.Subquery = GenerateSelectSubquery(sel, tableInfo, plan.IndexUsed)
return plan, nil
}
-func analyzeSelectExprs(exprs sqlparser.SelectExprs, table *schema.Table) (selects []int, err error) {
- selects = make([]int, 0, len(exprs))
- for _, expr := range exprs {
- switch expr := expr.(type) {
- case *sqlparser.StarExpr:
- // Append all columns.
- for colIndex := range table.Columns {
- selects = append(selects, colIndex)
- }
- case *sqlparser.NonStarExpr:
- name := sqlparser.GetColName(expr.Expr)
- if name.Original() == "" {
- // Not a simple column name.
- return nil, nil
- }
- colIndex := table.FindColumn(name.Original())
- if colIndex == -1 {
- return nil, fmt.Errorf("column %s not found in table %s", name, table.Name)
- }
- selects = append(selects, colIndex)
- default:
- return nil, fmt.Errorf("unsupported construct: %s", sqlparser.String(expr))
- }
- }
- return selects, nil
-}
-
-func analyzeFrom(tableExprs sqlparser.TableExprs) (tablename string, hasHints bool) {
+func analyzeFrom(tableExprs sqlparser.TableExprs) string {
if len(tableExprs) > 1 {
- return "", false
+ return ""
}
node, ok := tableExprs[0].(*sqlparser.AliasedTableExpr)
if !ok {
- return "", false
+ return ""
}
- return sqlparser.GetTableName(node.Expr), node.Hints != nil
+ return sqlparser.GetTableName(node.Expr)
}
-func analyzeWhere(node *sqlparser.Where) (conditions []sqlparser.BoolExpr) {
+func analyzeWhere(node *sqlparser.Where, pkIndex *schema.Index) []interface{} {
if node == nil {
return nil
}
- return analyzeBoolean(node.Expr)
+ conditions := analyzeBoolean(node.Expr)
+ if conditions == nil {
+ return nil
+ }
+ return getPKValues(conditions, pkIndex)
}
-func analyzeBoolean(node sqlparser.BoolExpr) (conditions []sqlparser.BoolExpr) {
+func analyzeBoolean(node sqlparser.BoolExpr) (conditions []*sqlparser.ComparisonExpr) {
switch node := node.(type) {
case *sqlparser.AndExpr:
left := analyzeBoolean(node.Left)
@@ -340,9 +181,6 @@ func analyzeBoolean(node sqlparser.BoolExpr) (conditions []sqlparser.BoolExpr) {
if left == nil || right == nil {
return nil
}
- if sqlparser.HasINClause(left) && sqlparser.HasINClause(right) {
- return nil
- }
return append(left, right...)
case *sqlparser.ParenBoolExpr:
return analyzeBoolean(node.Expr)
@@ -351,29 +189,48 @@ func analyzeBoolean(node sqlparser.BoolExpr) (conditions []sqlparser.BoolExpr) {
case sqlparser.StringIn(
node.Operator,
sqlparser.EqualStr,
- sqlparser.LessThanStr,
- sqlparser.GreaterThanStr,
- sqlparser.LessEqualStr,
- sqlparser.GreaterEqualStr,
- sqlparser.NullSafeEqualStr,
sqlparser.LikeStr):
if sqlparser.IsColName(node.Left) && sqlparser.IsValue(node.Right) {
- return []sqlparser.BoolExpr{node}
+ return []*sqlparser.ComparisonExpr{node}
}
case node.Operator == sqlparser.InStr:
if sqlparser.IsColName(node.Left) && sqlparser.IsSimpleTuple(node.Right) {
- return []sqlparser.BoolExpr{node}
+ return []*sqlparser.ComparisonExpr{node}
}
}
- case *sqlparser.RangeCond:
- if node.Operator != sqlparser.BetweenStr {
+ }
+ return nil
+}
+
+func getPKValues(conditions []*sqlparser.ComparisonExpr, pkIndex *schema.Index) []interface{} {
+ pkValues := make([]interface{}, len(pkIndex.Columns))
+ inClauseSeen := false
+ for _, condition := range conditions {
+ if condition.Operator == sqlparser.InStr {
+ if inClauseSeen {
+ return nil
+ }
+ inClauseSeen = true
+ }
+ index := pkIndex.FindColumn(condition.Left.(*sqlparser.ColName).Name.Original())
+ if index == -1 {
+ return nil
+ }
+ if pkValues[index] != nil {
return nil
}
- if sqlparser.IsColName(node.Left) && sqlparser.IsValue(node.From) && sqlparser.IsValue(node.To) {
- return []sqlparser.BoolExpr{node}
+ var err error
+ pkValues[index], err = sqlparser.AsInterface(condition.Right)
+ if err != nil {
+ return nil
}
}
- return nil
+ for _, v := range pkValues {
+ if v == nil {
+ return nil
+ }
+ }
+ return pkValues
}
func analyzeInsert(ins *sqlparser.Insert, getTable TableGetter) (plan *ExecPlan, err error) {
@@ -410,16 +267,17 @@ func analyzeInsert(ins *sqlparser.Insert, getTable TableGetter) (plan *ExecPlan,
plan.OuterQuery = GenerateInsertOuterQuery(ins)
plan.Subquery = GenerateSelectLimitQuery(sel)
if len(ins.Columns) != 0 {
- plan.ColumnNumbers, err = analyzeSelectExprs(sqlparser.SelectExprs(ins.Columns), tableInfo)
- if err != nil {
- return nil, err
+ for _, col := range ins.Columns {
+ colIndex := tableInfo.FindColumn(col.Original())
+ if colIndex == -1 {
+ return nil, fmt.Errorf("column %v not found in table %s", col, tableInfo.Name)
+ }
+ plan.ColumnNumbers = append(plan.ColumnNumbers, colIndex)
}
} else {
- // StarExpr node will expand into all columns
- n := sqlparser.SelectExprs{&sqlparser.StarExpr{}}
- plan.ColumnNumbers, err = analyzeSelectExprs(n, tableInfo)
- if err != nil {
- return nil, err
+ // Add all columns.
+ for colIndex := range tableInfo.Columns {
+ plan.ColumnNumbers = append(plan.ColumnNumbers, colIndex)
}
}
plan.SubqueryPKColumns = pkColumnNumbers
@@ -449,11 +307,8 @@ func analyzeInsert(ins *sqlparser.Insert, getTable TableGetter) (plan *ExecPlan,
}
plan.SecondaryPKValues, err = analyzeUpdateExpressions(sqlparser.UpdateExprs(ins.OnDup), tableInfo.Indexes[0])
if err != nil {
- if err == ErrTooComplex {
- plan.Reason = ReasonPKChange
- return plan, nil
- }
- return nil, err
+ plan.Reason = ReasonPKChange
+ return plan, nil
}
plan.PlanID = PlanUpsertPK
newins := *ins
@@ -479,7 +334,7 @@ func getInsertPKColumns(columns sqlparser.Columns, tableInfo *schema.Table) (pkC
pkColumnNumbers[i] = -1
}
for i, column := range columns {
- index := pkIndex.FindColumn(sqlparser.GetColName(column.(*sqlparser.NonStarExpr).Expr).Original())
+ index := pkIndex.FindColumn(column.Original())
if index == -1 {
continue
}
diff --git a/go/vt/tabletserver/planbuilder/index_analysis.go b/go/vt/tabletserver/planbuilder/index_analysis.go
deleted file mode 100644
index 385e43dcda6..00000000000
--- a/go/vt/tabletserver/planbuilder/index_analysis.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2014, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package planbuilder
-
-import (
- "github.com/youtube/vitess/go/vt/schema"
- "github.com/youtube/vitess/go/vt/sqlparser"
-)
-
-type indexScore struct {
- Index *schema.Index
- ColumnMatch []bool
- MatchFailed bool
-}
-
-type scoreValue int64
-
-const (
- noMatch = scoreValue(-1)
- perfectScore = scoreValue(0)
-)
-
-func newIndexScore(index *schema.Index) *indexScore {
- return &indexScore{index, make([]bool, len(index.Columns)), false}
-}
-
-func (is *indexScore) FindMatch(columnName string) int {
- if is.MatchFailed {
- return -1
- }
- if index := is.Index.FindColumn(columnName); index != -1 {
- is.ColumnMatch[index] = true
- return index
- }
- // If the column is among the data columns, we can still use
- // the index without going to the main table
- if index := is.Index.FindDataColumn(columnName); index == -1 {
- is.MatchFailed = true
- }
- return -1
-}
-
-func (is *indexScore) GetScore() scoreValue {
- if is.MatchFailed {
- return noMatch
- }
- score := noMatch
- for i, indexColumn := range is.ColumnMatch {
- if indexColumn {
- score = scoreValue(is.Index.Cardinality[i])
- continue
- }
- return score
- }
- return perfectScore
-}
-
-func newIndexScoreList(indexes []*schema.Index) []*indexScore {
- scoreList := make([]*indexScore, len(indexes))
- for i, v := range indexes {
- scoreList[i] = newIndexScore(v)
- }
- return scoreList
-}
-
-func getPKValues(conditions []sqlparser.BoolExpr, pkIndex *schema.Index) (pkValues []interface{}, err error) {
- pkindexScore := newIndexScore(pkIndex)
- pkValues = make([]interface{}, len(pkindexScore.ColumnMatch))
- for _, condition := range conditions {
- condition, ok := condition.(*sqlparser.ComparisonExpr)
- if !ok {
- return nil, nil
- }
- if !sqlparser.StringIn(condition.Operator, sqlparser.EqualStr, sqlparser.InStr) {
- return nil, nil
- }
- index := pkindexScore.FindMatch(condition.Left.(*sqlparser.ColName).Name.Original())
- if index == -1 {
- return nil, nil
- }
- switch condition.Operator {
- case sqlparser.EqualStr, sqlparser.InStr:
- var err error
- pkValues[index], err = sqlparser.AsInterface(condition.Right)
- if err != nil {
- return nil, err
- }
- default:
- panic("unreachable")
- }
- }
- if pkindexScore.GetScore() == perfectScore {
- return pkValues, nil
- }
- return nil, nil
-}
-
-func getIndexMatch(conditions []sqlparser.BoolExpr, indexes []*schema.Index) *schema.Index {
- indexScores := newIndexScoreList(indexes)
- for _, condition := range conditions {
- var col string
- switch condition := condition.(type) {
- case *sqlparser.ComparisonExpr:
- col = condition.Left.(*sqlparser.ColName).Name.Original()
- case *sqlparser.RangeCond:
- col = condition.Left.(*sqlparser.ColName).Name.Original()
- default:
- panic("unreachaable")
- }
- for _, index := range indexScores {
- index.FindMatch(col)
- }
- }
- highScore := noMatch
- highScorer := -1
- for i, index := range indexScores {
- curScore := index.GetScore()
- if curScore == noMatch {
- continue
- }
- if curScore == perfectScore {
- highScorer = i
- break
- }
- // Prefer secondary index over primary key
- if curScore >= highScore {
- highScore = curScore
- highScorer = i
- }
- }
- if highScorer == -1 {
- return nil
- }
- return indexes[highScorer]
-}
diff --git a/go/vt/tabletserver/planbuilder/plan.go b/go/vt/tabletserver/planbuilder/plan.go
index 77eac4f23ca..de1879c0eff 100644
--- a/go/vt/tabletserver/planbuilder/plan.go
+++ b/go/vt/tabletserver/planbuilder/plan.go
@@ -9,7 +9,6 @@ import (
"errors"
"fmt"
- log "github.com/golang/glog"
"github.com/youtube/vitess/go/vt/schema"
"github.com/youtube/vitess/go/vt/sqlparser"
"github.com/youtube/vitess/go/vt/tableacl"
@@ -28,15 +27,13 @@ const (
// PlanPassSelect is pass through select statements. This is the
// default plan for select statements.
PlanPassSelect PlanType = iota
+ // PlanSelectLock is for a select that locks.
+ PlanSelectLock
+ // PlanNextval is for NEXTVAL
+ PlanNextval
// PlanPassDML is pass through update & delete statements. This is
// the default plan for update and delete statements.
PlanPassDML
- // PlanPKEqual is deprecated. Use PlanPKIn instead.
- PlanPKEqual
- // PlanPKIn is select statement with a single IN clause on primary key
- PlanPKIn
- // PlanSelectSubquery is select statement with a subselect statement
- PlanSelectSubquery
// PlanDMLPK is an update or delete with an equality where clause(s)
// on primary key(s)
PlanDMLPK
@@ -47,6 +44,8 @@ const (
PlanInsertPK
// PlanInsertSubquery is same as PlanDMLSubquery but for inserts
PlanInsertSubquery
+ // PlanUpsertPK is for insert ... on duplicate key constructs
+ PlanUpsertPK
// PlanSet is for SET statements
PlanSet
// PlanDDL is for DDL statements
@@ -55,10 +54,6 @@ const (
PlanSelectStream
// PlanOther is for SHOW, DESCRIBE & EXPLAIN statements
PlanOther
- // PlanUpsertPK is for insert ... on duplicate key constructs
- PlanUpsertPK
- // PlanNextval is for NEXTVAL
- PlanNextval
// NumPlans stores the total number of plans
NumPlans
)
@@ -66,20 +61,18 @@ const (
// Must exactly match order of plan constants.
var planName = []string{
"PASS_SELECT",
+ "SELECT_LOCK",
+ "NEXTVAL",
"PASS_DML",
- "PK_EQUAL",
- "PK_IN",
- "SELECT_SUBQUERY",
"DML_PK",
"DML_SUBQUERY",
"INSERT_PK",
"INSERT_SUBQUERY",
+ "UPSERT_PK",
"SET",
"DDL",
"SELECT_STREAM",
"OTHER",
- "UPSERT_PK",
- "NEXTVAL",
}
func (pt PlanType) String() string {
@@ -101,7 +94,7 @@ func PlanByName(s string) (pt PlanType, ok bool) {
// IsSelect returns true if PlanType is about a select query.
func (pt PlanType) IsSelect() bool {
- return pt == PlanPassSelect || pt == PlanPKIn || pt == PlanSelectSubquery || pt == PlanSelectStream
+ return pt == PlanPassSelect || pt == PlanSelectLock
}
// MarshalJSON returns a json string for PlanType.
@@ -116,8 +109,7 @@ func (pt PlanType) MinRole() tableacl.Role {
var tableAclRoles = map[PlanType]tableacl.Role{
PlanPassSelect: tableacl.READER,
- PlanPKIn: tableacl.READER,
- PlanSelectSubquery: tableacl.READER,
+ PlanSelectLock: tableacl.READER,
PlanSet: tableacl.READER,
PlanPassDML: tableacl.WRITER,
PlanDMLPK: tableacl.WRITER,
@@ -137,20 +129,9 @@ type ReasonType int
// Reason codes give a hint about why a certain plan was chosen.
const (
ReasonDefault ReasonType = iota
- ReasonSelect
ReasonTable
- ReasonNocache
- ReasonSelectList
- ReasonLock
- ReasonWhere
- ReasonOrder
- ReasonLimit
- ReasonPKIndex
- ReasonCovering
- ReasonNoIndexMatch
ReasonTableNoIndex
ReasonPKChange
- ReasonHasHints
ReasonComplexExpr
ReasonUpsert
)
@@ -158,20 +139,9 @@ const (
// Must exactly match order of reason constants.
var reasonName = []string{
"DEFAULT",
- "SELECT",
"TABLE",
- "NOCACHE",
- "SELECT_LIST",
- "LOCK",
- "WHERE",
- "ORDER",
- "LIMIT",
- "PKINDEX",
- "COVERING",
- "NOINDEX_MATCH",
"TABLE_NOINDEX",
"PK_CHANGE",
- "HAS_HINTS",
"COMPLEX_EXPR",
"UPSERT",
}
@@ -204,32 +174,22 @@ type ExecPlan struct {
// For PK plans, only OuterQuery is set.
// For SUBQUERY plans, Subquery is also set.
- // IndexUsed is set only for PlanSelectSubquery
OuterQuery *sqlparser.ParsedQuery `json:",omitempty"`
Subquery *sqlparser.ParsedQuery `json:",omitempty"`
UpsertQuery *sqlparser.ParsedQuery `json:",omitempty"`
- IndexUsed string `json:",omitempty"`
- // For selects, columns to be returned
- // For PlanInsertSubquery, columns to be inserted
+ // PlanInsertSubquery: columns to be inserted.
ColumnNumbers []int `json:",omitempty"`
- // PlanPKIn, PlanDMLPK: where clause values
- // PlanInsertPK: values clause
+ // PlanDMLPK: where clause values.
+ // PlanInsertPK: values clause.
PKValues []interface{} `json:",omitempty"`
- // PK_IN. Limit clause value.
- Limit interface{} `json:",omitempty"`
-
- // For update: set clause if pk is changing
+ // For update: set clause if pk is changing.
SecondaryPKValues []interface{} `json:",omitempty"`
- // For PlanInsertSubquery: pk columns in the subquery result
+ // For PlanInsertSubquery: pk columns in the subquery result.
SubqueryPKColumns []int `json:",omitempty"`
-
- // PlanSet
- SetKey string `json:",omitempty"`
- SetValue interface{} `json:",omitempty"`
}
func (plan *ExecPlan) setTableInfo(tableName string, getTable TableGetter) (*schema.Table, error) {
@@ -250,14 +210,29 @@ func GetExecPlan(sql string, getTable TableGetter) (plan *ExecPlan, err error) {
if err != nil {
return nil, err
}
- plan, err = analyzeSQL(statement, getTable)
- if err != nil {
- return nil, err
- }
- if plan.PlanID == PlanPassDML {
- log.Warningf("PASS_DML: %s", sql)
+ switch stmt := statement.(type) {
+ case *sqlparser.Union:
+ return &ExecPlan{
+ PlanID: PlanPassSelect,
+ FieldQuery: GenerateFieldQuery(stmt),
+ FullQuery: GenerateFullQuery(stmt),
+ }, nil
+ case *sqlparser.Select:
+ return analyzeSelect(stmt, getTable)
+ case *sqlparser.Insert:
+ return analyzeInsert(stmt, getTable)
+ case *sqlparser.Update:
+ return analyzeUpdate(stmt, getTable)
+ case *sqlparser.Delete:
+ return analyzeDelete(stmt, getTable)
+ case *sqlparser.Set:
+ return analyzeSet(stmt), nil
+ case *sqlparser.DDL:
+ return analyzeDDL(stmt, getTable), nil
+ case *sqlparser.Other:
+ return &ExecPlan{PlanID: PlanOther}, nil
}
- return plan, nil
+ return nil, errors.New("invalid SQL")
}
// GetStreamExecPlan generates a ExecPlan given a sql query and a TableGetter.
@@ -277,15 +252,9 @@ func GetStreamExecPlan(sql string, getTable TableGetter) (plan *ExecPlan, err er
if stmt.Lock != "" {
return nil, errors.New("select with lock not allowed for streaming")
}
- tableName, _ := analyzeFrom(stmt.From)
- // This will block usage of NEXTVAL.
- if tableName == "dual" {
- return nil, errors.New("select from dual not allowed for streaming")
- }
- if tableName != "" {
+ if tableName := analyzeFrom(stmt.From); tableName != "" {
plan.setTableInfo(tableName, getTable)
}
-
case *sqlparser.Union:
// pass
default:
@@ -294,30 +263,3 @@ func GetStreamExecPlan(sql string, getTable TableGetter) (plan *ExecPlan, err er
return plan, nil
}
-
-func analyzeSQL(statement sqlparser.Statement, getTable TableGetter) (plan *ExecPlan, err error) {
- switch stmt := statement.(type) {
- case *sqlparser.Union:
- return &ExecPlan{
- PlanID: PlanPassSelect,
- FieldQuery: GenerateFieldQuery(stmt),
- FullQuery: GenerateFullQuery(stmt),
- Reason: ReasonSelect,
- }, nil
- case *sqlparser.Select:
- return analyzeSelect(stmt, getTable)
- case *sqlparser.Insert:
- return analyzeInsert(stmt, getTable)
- case *sqlparser.Update:
- return analyzeUpdate(stmt, getTable)
- case *sqlparser.Delete:
- return analyzeDelete(stmt, getTable)
- case *sqlparser.Set:
- return analyzeSet(stmt), nil
- case *sqlparser.DDL:
- return analyzeDDL(stmt, getTable), nil
- case *sqlparser.Other:
- return &ExecPlan{PlanID: PlanOther}, nil
- }
- return nil, errors.New("invalid SQL")
-}
diff --git a/go/vt/tabletserver/planbuilder/query_gen.go b/go/vt/tabletserver/planbuilder/query_gen.go
index b38ae38a317..0a94a0b1f90 100644
--- a/go/vt/tabletserver/planbuilder/query_gen.go
+++ b/go/vt/tabletserver/planbuilder/query_gen.go
@@ -67,15 +67,6 @@ func GenerateSelectLimitQuery(selStmt sqlparser.SelectStatement) *sqlparser.Pars
return buf.ParsedQuery()
}
-// GenerateSelectOuterQuery generates the outer query for dmls.
-func GenerateSelectOuterQuery(sel *sqlparser.Select, tableInfo *schema.Table) *sqlparser.ParsedQuery {
- buf := sqlparser.NewTrackedBuffer(nil)
- fmt.Fprintf(buf, "select ")
- writeColumnList(buf, tableInfo.Columns)
- buf.Myprintf(" from %v where %a", sel.From, ":#pk")
- return buf.ParsedQuery()
-}
-
// GenerateInsertOuterQuery generates the outer query for inserts.
func GenerateInsertOuterQuery(ins *sqlparser.Insert) *sqlparser.ParsedQuery {
buf := sqlparser.NewTrackedBuffer(nil)
@@ -103,25 +94,6 @@ func GenerateDeleteOuterQuery(del *sqlparser.Delete) *sqlparser.ParsedQuery {
return buf.ParsedQuery()
}
-// GenerateSelectSubquery generates the subquery for selects.
-func GenerateSelectSubquery(sel *sqlparser.Select, tableInfo *schema.Table, index string) *sqlparser.ParsedQuery {
- hint := &sqlparser.IndexHints{Type: sqlparser.UseStr, Indexes: []sqlparser.ColIdent{sqlparser.NewColIdent(index)}}
- tableExpr := sel.From[0].(*sqlparser.AliasedTableExpr)
- savedHint := tableExpr.Hints
- tableExpr.Hints = hint
- defer func() {
- tableExpr.Hints = savedHint
- }()
- return GenerateSubquery(
- tableInfo.Indexes[0].Columns,
- tableExpr,
- sel.Where,
- sel.OrderBy,
- sel.Limit,
- false,
- )
-}
-
// GenerateUpdateSubquery generates the subquery for updats.
func GenerateUpdateSubquery(upd *sqlparser.Update, tableInfo *schema.Table) *sqlparser.ParsedQuery {
return GenerateSubquery(
@@ -164,11 +136,3 @@ func GenerateSubquery(columns []cistring.CIString, table *sqlparser.AliasedTable
}
return buf.ParsedQuery()
}
-
-func writeColumnList(buf *sqlparser.TrackedBuffer, columns []schema.TableColumn) {
- i := 0
- for i = 0; i < len(columns)-1; i++ {
- fmt.Fprintf(buf, "%v, ", columns[i].Name)
- }
- fmt.Fprintf(buf, "%v", columns[i].Name)
-}
diff --git a/go/vt/tabletserver/query_engine.go b/go/vt/tabletserver/query_engine.go
index ae1d75d89e5..cc30cc1c11e 100644
--- a/go/vt/tabletserver/query_engine.go
+++ b/go/vt/tabletserver/query_engine.go
@@ -6,7 +6,6 @@ package tabletserver
import (
"net/http"
- "sync"
"time"
log "github.com/golang/glog"
@@ -22,10 +21,6 @@ import (
"github.com/youtube/vitess/go/vt/tableacl/acl"
)
-// spotCheckMultiplier determines the precision of the
-// spot check ratio: 1e6 == 6 digits
-const spotCheckMultiplier = 1e6
-
// QueryEngine implements the core functionality of tabletserver.
// It assumes that no requests will be sent to it before Open is
// called and succeeds.
@@ -47,7 +42,6 @@ type QueryEngine struct {
dbconfigs dbconfigs.DBConfigs
// Pools
- cachePool *CachePool
connPool *ConnPool
streamConnPool *ConnPool
@@ -55,10 +49,8 @@ type QueryEngine struct {
txPool *TxPool
consolidator *sync2.Consolidator
streamQList *QueryList
- tasks sync.WaitGroup
// Vars
- spotCheckFreq sync2.AtomicInt64
strictMode sync2.AtomicInt64
autoCommit sync2.AtomicInt64
maxResultSize sync2.AtomicInt64
@@ -88,12 +80,6 @@ type compiledPlan struct {
TransactionID int64
}
-// CacheInvalidator provides the abstraction needed for an instant invalidation
-// vs. delayed invalidation in the case of in-transaction dmls
-type CacheInvalidator interface {
- Delete(key string)
-}
-
// Helper method for conn pools to convert errors
func getOrPanic(ctx context.Context, pool *ConnPool) *DBConn {
conn, err := pool.Get(ctx)
@@ -115,26 +101,15 @@ func getOrPanic(ctx context.Context, pool *ConnPool) *DBConn {
func NewQueryEngine(checker MySQLChecker, config Config) *QueryEngine {
qe := &QueryEngine{config: config}
qe.queryServiceStats = NewQueryServiceStats(config.StatsPrefix, config.EnablePublishStats)
-
- qe.cachePool = NewCachePool(
- config.PoolNamePrefix+"Rowcache",
- config.RowCache,
- time.Duration(config.IdleTimeout*1e9),
- config.DebugURLPrefix+"/memcache/",
- config.EnablePublishStats,
- qe.queryServiceStats,
- )
qe.schemaInfo = NewSchemaInfo(
config.StatsPrefix,
checker,
config.QueryCacheSize,
time.Duration(config.SchemaReloadTime*1e9),
time.Duration(config.IdleTimeout*1e9),
- qe.cachePool,
map[string]string{
debugQueryPlansKey: config.DebugURLPrefix + "/query_plans",
debugQueryStatsKey: config.DebugURLPrefix + "/query_stats",
- debugTableStatsKey: config.DebugURLPrefix + "/table_stats",
debugSchemaKey: config.DebugURLPrefix + "/schema",
debugQueryRulesKey: config.DebugURLPrefix + "/query_rules",
},
@@ -173,7 +148,6 @@ func NewQueryEngine(checker MySQLChecker, config Config) *QueryEngine {
http.Handle(config.DebugURLPrefix+"/consolidations", qe.consolidator)
qe.streamQList = NewQueryList()
- qe.spotCheckFreq = sync2.NewAtomicInt64(int64(config.SpotCheckRatio * spotCheckMultiplier))
if config.StrictMode {
qe.strictMode.Set(1)
}
@@ -209,9 +183,6 @@ func NewQueryEngine(checker MySQLChecker, config Config) *QueryEngine {
stats.Publish(config.StatsPrefix+"MaxResultSize", stats.IntFunc(qe.maxResultSize.Get))
stats.Publish(config.StatsPrefix+"MaxDMLRows", stats.IntFunc(qe.maxDMLRows.Get))
stats.Publish(config.StatsPrefix+"StreamBufferSize", stats.IntFunc(qe.streamBufferSize.Get))
- stats.Publish(config.StatsPrefix+"RowcacheSpotCheckRatio", stats.FloatFunc(func() float64 {
- return float64(qe.spotCheckFreq.Get()) / spotCheckMultiplier
- }))
stats.Publish(config.StatsPrefix+"TableACLExemptCount", stats.IntFunc(qe.tableaclExemptCount.Get))
tableACLAllowedName = "TableACLAllowed"
tableACLDeniedName = "TableACLDenied"
@@ -226,7 +197,7 @@ func NewQueryEngine(checker MySQLChecker, config Config) *QueryEngine {
}
// Open must be called before sending requests to QueryEngine.
-func (qe *QueryEngine) Open(dbconfigs dbconfigs.DBConfigs, schemaOverrides []SchemaOverride) {
+func (qe *QueryEngine) Open(dbconfigs dbconfigs.DBConfigs) {
qe.dbconfigs = dbconfigs
appParams := dbconfigs.App.ConnParams
// Create dba params based on App connection params
@@ -241,20 +212,9 @@ func (qe *QueryEngine) Open(dbconfigs dbconfigs.DBConfigs, schemaOverrides []Sch
if qe.strictMode.Get() != 0 {
strictMode = true
}
- if !strictMode && qe.config.RowCache.Enabled {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Rowcache cannot be enabled when queryserver-config-strict-mode is false"))
- }
- if qe.config.RowCache.Enabled {
- qe.cachePool.Open()
- log.Infof("rowcache is enabled")
- } else {
- log.Infof("rowcache is not enabled")
- }
start := time.Now()
- // schemaInfo depends on cachePool. Every table that has a rowcache
- // points to the cachePool.
- qe.schemaInfo.Open(&appParams, &dbaParams, schemaOverrides, strictMode)
+ qe.schemaInfo.Open(&appParams, &dbaParams, strictMode)
log.Infof("Time taken to load the schema: %v", time.Now().Sub(start))
qe.connPool.Open(&appParams, &dbaParams)
@@ -262,25 +222,6 @@ func (qe *QueryEngine) Open(dbconfigs dbconfigs.DBConfigs, schemaOverrides []Sch
qe.txPool.Open(&appParams, &dbaParams)
}
-// Launch launches the specified function inside a goroutine.
-// If Close or WaitForTxEmpty is called while a goroutine is running,
-// QueryEngine will not return until the existing functions have completed.
-// This functionality allows us to launch tasks with the assurance that
-// the QueryEngine will not be closed underneath us.
-func (qe *QueryEngine) Launch(f func()) {
- qe.tasks.Add(1)
- go func() {
- defer func() {
- qe.tasks.Done()
- if x := recover(); x != nil {
- qe.queryServiceStats.InternalErrors.Add("Task", 1)
- log.Errorf("task error: %v", x)
- }
- }()
- f()
- }()
-}
-
// IsMySQLReachable returns true if we can connect to MySQL.
func (qe *QueryEngine) IsMySQLReachable() bool {
conn, err := dbconnpool.NewDBConnection(&qe.dbconfigs.App.ConnParams, qe.queryServiceStats.MySQLStats)
@@ -306,50 +247,9 @@ func (qe *QueryEngine) WaitForTxEmpty() {
// You must ensure that no more queries will be sent
// before calling Close.
func (qe *QueryEngine) Close() {
- qe.tasks.Wait()
// Close in reverse order of Open.
qe.txPool.Close()
qe.streamConnPool.Close()
qe.connPool.Close()
qe.schemaInfo.Close()
- qe.cachePool.Close()
-}
-
-// Commit commits the specified transaction.
-func (qe *QueryEngine) Commit(ctx context.Context, logStats *LogStats, transactionID int64) {
- dirtyTables, err := qe.txPool.SafeCommit(ctx, transactionID)
- for tableName, invalidList := range dirtyTables {
- tableInfo := qe.schemaInfo.GetTable(tableName)
- if tableInfo == nil {
- continue
- }
- invalidations := int64(0)
- for key := range invalidList {
- // Use context.Background, becaause we don't want to fail
- // these deletes.
- tableInfo.Cache.Delete(context.Background(), key)
- invalidations++
- }
- logStats.CacheInvalidations += invalidations
- tableInfo.invalidations.Add(invalidations)
- }
- if err != nil {
- panic(err)
- }
-}
-
-// ClearRowcache invalidates all items in the rowcache.
-func (qe *QueryEngine) ClearRowcache(ctx context.Context) error {
- if qe.cachePool.IsClosed() {
- return NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "rowcache is not up")
- }
- conn := qe.cachePool.Get(ctx)
- defer func() { qe.cachePool.Put(conn) }()
-
- if err := conn.FlushAll(); err != nil {
- conn.Close()
- conn = nil
- return NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "%s", err)
- }
- return nil
}
diff --git a/go/vt/tabletserver/query_executor.go b/go/vt/tabletserver/query_executor.go
index d6ce89b8da3..3f224cf995a 100644
--- a/go/vt/tabletserver/query_executor.go
+++ b/go/vt/tabletserver/query_executor.go
@@ -10,7 +10,6 @@ import (
"strings"
"time"
- log "github.com/golang/glog"
"github.com/youtube/vitess/go/cistring"
"github.com/youtube/vitess/go/hack"
"github.com/youtube/vitess/go/mysql"
@@ -88,10 +87,6 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) {
conn := qre.qe.txPool.Get(qre.transactionID)
defer conn.Recycle()
conn.RecordQuery(qre.query)
- var invalidator CacheInvalidator
- if qre.plan.TableInfo != nil && qre.plan.TableInfo.IsCached() {
- invalidator = conn.DirtyKeys(qre.plan.TableName)
- }
switch qre.plan.PlanID {
case planbuilder.PlanPassDML:
if qre.qe.strictMode.Get() != 0 {
@@ -103,27 +98,22 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) {
case planbuilder.PlanInsertSubquery:
reply, err = qre.execInsertSubquery(conn)
case planbuilder.PlanDMLPK:
- reply, err = qre.execDMLPK(conn, invalidator)
+ reply, err = qre.execDMLPK(conn)
case planbuilder.PlanDMLSubquery:
- reply, err = qre.execDMLSubquery(conn, invalidator)
+ reply, err = qre.execDMLSubquery(conn)
case planbuilder.PlanOther:
reply, err = qre.execSQL(conn, qre.query, true)
case planbuilder.PlanUpsertPK:
- reply, err = qre.execUpsertPK(conn, invalidator)
+ reply, err = qre.execUpsertPK(conn)
default: // select or set in a transaction, just count as select
reply, err = qre.execDirect(conn)
}
} else {
switch qre.plan.PlanID {
case planbuilder.PlanPassSelect:
- if qre.plan.Reason == planbuilder.ReasonLock {
- return nil, NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "Disallowed outside transaction")
- }
reply, err = qre.execSelect()
- case planbuilder.PlanPKIn:
- reply, err = qre.execPKIN()
- case planbuilder.PlanSelectSubquery:
- reply, err = qre.execSubquery()
+ case planbuilder.PlanSelectLock:
+ return nil, NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "Disallowed outside transaction")
case planbuilder.PlanSet:
reply, err = qre.execSet()
case planbuilder.PlanOther:
@@ -174,10 +164,6 @@ func (qre *QueryExecutor) Stream(sendReply func(*sqltypes.Result) error) error {
func (qre *QueryExecutor) execDmlAutoCommit() (reply *sqltypes.Result, err error) {
return qre.execAsTransaction(func(conn *TxConnection) (reply *sqltypes.Result, err error) {
conn.RecordQuery(qre.query)
- var invalidator CacheInvalidator
- if qre.plan.TableInfo != nil && qre.plan.TableInfo.IsCached() {
- invalidator = conn.DirtyKeys(qre.plan.TableName)
- }
switch qre.plan.PlanID {
case planbuilder.PlanPassDML:
if qre.qe.strictMode.Get() != 0 {
@@ -189,11 +175,11 @@ func (qre *QueryExecutor) execDmlAutoCommit() (reply *sqltypes.Result, err error
case planbuilder.PlanInsertSubquery:
reply, err = qre.execInsertSubquery(conn)
case planbuilder.PlanDMLPK:
- reply, err = qre.execDMLPK(conn, invalidator)
+ reply, err = qre.execDMLPK(conn)
case planbuilder.PlanDMLSubquery:
- reply, err = qre.execDMLSubquery(conn, invalidator)
+ reply, err = qre.execDMLSubquery(conn)
case planbuilder.PlanUpsertPK:
- reply, err = qre.execUpsertPK(conn, invalidator)
+ reply, err = qre.execUpsertPK(conn)
default:
return nil, NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "unsupported query: %s", qre.query)
}
@@ -213,7 +199,7 @@ func (qre *QueryExecutor) execAsTransaction(f func(conn *TxConnection) (*sqltype
qre.qe.txPool.Rollback(qre.ctx, transactionID)
qre.logStats.AddRewrittenSQL("rollback", time.Now())
} else {
- qre.qe.Commit(qre.ctx, qre.logStats, transactionID)
+ qre.qe.txPool.Commit(qre.ctx, transactionID)
qre.logStats.AddRewrittenSQL("commit", time.Now())
}
}()
@@ -305,7 +291,7 @@ func (qre *QueryExecutor) execDDL() (*sqltypes.Result, error) {
}
txid := qre.qe.txPool.Begin(qre.ctx)
- defer qre.qe.txPool.SafeCommit(qre.ctx, txid)
+ defer qre.qe.txPool.Commit(qre.ctx, txid)
// Stolen from Execute
conn := qre.qe.txPool.Get(txid)
@@ -324,18 +310,6 @@ func (qre *QueryExecutor) execDDL() (*sqltypes.Result, error) {
return result, nil
}
-func (qre *QueryExecutor) execPKIN() (*sqltypes.Result, error) {
- pkRows, err := buildValueList(qre.plan.TableInfo, qre.plan.PKValues, qre.bindVars)
- if err != nil {
- return nil, err
- }
- limit, err := getLimit(qre.plan.Limit, qre.bindVars)
- if err != nil {
- return nil, err
- }
- return qre.fetchMulti(pkRows, limit)
-}
-
func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) {
t := qre.plan.TableInfo
t.Seq.Lock()
@@ -399,127 +373,6 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) {
}, nil
}
-func (qre *QueryExecutor) execSubquery() (*sqltypes.Result, error) {
- innerResult, err := qre.qFetch(qre.logStats, qre.plan.Subquery, qre.bindVars)
- if err != nil {
- return nil, err
- }
- return qre.fetchMulti(innerResult.Rows, -1)
-}
-
-func (qre *QueryExecutor) fetchMulti(pkRows [][]sqltypes.Value, limit int64) (*sqltypes.Result, error) {
- if qre.plan.Fields == nil {
- // TODO(aaijazi): Is this due to a bad query, or an internal error? We might want to change
- // this to ErrorCode_BAD_INPUT instead.
- return nil, NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "query plan.Fields is empty")
- }
- result := &sqltypes.Result{Fields: qre.plan.Fields}
- if len(pkRows) == 0 || limit == 0 {
- return result, nil
- }
- tableInfo := qre.plan.TableInfo
- keys := make([]string, len(pkRows))
- for i, pk := range pkRows {
- keys[i] = buildKey(pk)
- }
- rcresults := tableInfo.Cache.Get(qre.ctx, keys)
- rows := make([][]sqltypes.Value, 0, len(pkRows))
- missingRows := make([][]sqltypes.Value, 0, len(pkRows))
- var hits, absent, misses int64
- for i, pk := range pkRows {
- rcresult := rcresults[keys[i]]
- if rcresult.Row != nil {
- if qre.mustVerify() {
- err := qre.spotCheck(rcresult, pk)
- if err != nil {
- return nil, err
- }
- }
- rows = append(rows, applyFilter(qre.plan.ColumnNumbers, rcresult.Row))
- hits++
- } else {
- missingRows = append(missingRows, pk)
- }
- }
- if len(missingRows) != 0 {
- bv := map[string]interface{}{
- "#pk": sqlparser.TupleEqualityList{
- Columns: cistring.ToStrings(qre.plan.TableInfo.Indexes[0].Columns),
- Rows: missingRows,
- },
- }
- resultFromdb, err := qre.qFetch(qre.logStats, qre.plan.OuterQuery, bv)
- if err != nil {
- return nil, err
- }
- misses = int64(len(resultFromdb.Rows))
- absent = int64(len(pkRows)) - hits - misses
- for _, row := range resultFromdb.Rows {
- rows = append(rows, applyFilter(qre.plan.ColumnNumbers, row))
- key := buildKey(applyFilter(qre.plan.TableInfo.PKColumns, row))
- tableInfo.Cache.Set(qre.ctx, key, row, rcresults[key].Cas)
- }
- }
-
- qre.logStats.CacheHits = hits
- qre.logStats.CacheAbsent = absent
- qre.logStats.CacheMisses = misses
-
- qre.logStats.QuerySources |= QuerySourceRowcache
-
- tableInfo.hits.Add(hits)
- tableInfo.absent.Add(absent)
- tableInfo.misses.Add(misses)
- result.RowsAffected = uint64(len(rows))
- result.Rows = rows
- // limit == 0 is already addressed upfront.
- if limit > 0 && len(result.Rows) > int(limit) {
- result.Rows = result.Rows[:limit]
- result.RowsAffected = uint64(limit)
- }
- return result, nil
-}
-
-func (qre *QueryExecutor) mustVerify() bool {
- return (Rand() % spotCheckMultiplier) < qre.qe.spotCheckFreq.Get()
-}
-
-func (qre *QueryExecutor) spotCheck(rcresult RCResult, pk []sqltypes.Value) error {
- qre.qe.queryServiceStats.SpotCheckCount.Add(1)
- bv := map[string]interface{}{
- "#pk": sqlparser.TupleEqualityList{
- Columns: cistring.ToStrings(qre.plan.TableInfo.Indexes[0].Columns),
- Rows: [][]sqltypes.Value{pk},
- },
- }
- resultFromdb, err := qre.qFetch(qre.logStats, qre.plan.OuterQuery, bv)
- if err != nil {
- return err
- }
- var dbrow []sqltypes.Value
- if len(resultFromdb.Rows) != 0 {
- dbrow = resultFromdb.Rows[0]
- }
- if dbrow == nil || !rowsAreEqual(rcresult.Row, dbrow) {
- qre.qe.Launch(func() { qre.recheckLater(rcresult, dbrow, pk) })
- }
- return nil
-}
-
-func (qre *QueryExecutor) recheckLater(rcresult RCResult, dbrow []sqltypes.Value, pk []sqltypes.Value) {
- time.Sleep(10 * time.Second)
- keys := make([]string, 1)
- keys[0] = buildKey(pk)
- reloaded := qre.plan.TableInfo.Cache.Get(context.Background(), keys)[keys[0]]
- // If reloaded row is absent or has changed, we're good
- if reloaded.Row == nil || reloaded.Cas != rcresult.Cas {
- return
- }
- log.Warningf("query: %v", qre.plan.FullQuery)
- log.Warningf("mismatch for: %v\ncache: %v\ndb: %v", pk, rcresult.Row, dbrow)
- qre.qe.queryServiceStats.InternalErrors.Add("Mismatch", 1)
-}
-
// execDirect always sends the query to mysql
func (qre *QueryExecutor) execDirect(conn poolConn) (*sqltypes.Result, error) {
if qre.plan.Fields != nil {
@@ -592,7 +445,7 @@ func (qre *QueryExecutor) execInsertPKRows(conn poolConn, pkRows [][]sqltypes.Va
return qre.directFetch(conn, qre.plan.OuterQuery, qre.bindVars, bsc)
}
-func (qre *QueryExecutor) execUpsertPK(conn poolConn, invalidator CacheInvalidator) (*sqltypes.Result, error) {
+func (qre *QueryExecutor) execUpsertPK(conn poolConn) (*sqltypes.Result, error) {
pkRows, err := buildValueList(qre.plan.TableInfo, qre.plan.PKValues, qre.bindVars)
if err != nil {
return nil, err
@@ -615,7 +468,7 @@ func (qre *QueryExecutor) execUpsertPK(conn poolConn, invalidator CacheInvalidat
}
// At this point, we know the insert failed due to a duplicate pk row.
// So, we just update the row.
- result, err = qre.execDMLPKRows(conn, qre.plan.UpsertQuery, pkRows, invalidator)
+ result, err = qre.execDMLPKRows(conn, qre.plan.UpsertQuery, pkRows)
if err != nil {
return nil, err
}
@@ -626,23 +479,23 @@ func (qre *QueryExecutor) execUpsertPK(conn poolConn, invalidator CacheInvalidat
return result, err
}
-func (qre *QueryExecutor) execDMLPK(conn poolConn, invalidator CacheInvalidator) (*sqltypes.Result, error) {
+func (qre *QueryExecutor) execDMLPK(conn poolConn) (*sqltypes.Result, error) {
pkRows, err := buildValueList(qre.plan.TableInfo, qre.plan.PKValues, qre.bindVars)
if err != nil {
return nil, err
}
- return qre.execDMLPKRows(conn, qre.plan.OuterQuery, pkRows, invalidator)
+ return qre.execDMLPKRows(conn, qre.plan.OuterQuery, pkRows)
}
-func (qre *QueryExecutor) execDMLSubquery(conn poolConn, invalidator CacheInvalidator) (*sqltypes.Result, error) {
+func (qre *QueryExecutor) execDMLSubquery(conn poolConn) (*sqltypes.Result, error) {
innerResult, err := qre.directFetch(conn, qre.plan.Subquery, qre.bindVars, nil)
if err != nil {
return nil, err
}
- return qre.execDMLPKRows(conn, qre.plan.OuterQuery, innerResult.Rows, invalidator)
+ return qre.execDMLPKRows(conn, qre.plan.OuterQuery, innerResult.Rows)
}
-func (qre *QueryExecutor) execDMLPKRows(conn poolConn, query *sqlparser.ParsedQuery, pkRows [][]sqltypes.Value, invalidator CacheInvalidator) (*sqltypes.Result, error) {
+func (qre *QueryExecutor) execDMLPKRows(conn poolConn, query *sqlparser.ParsedQuery, pkRows [][]sqltypes.Value) (*sqltypes.Result, error) {
if len(pkRows) == 0 {
return &sqltypes.Result{RowsAffected: 0}, nil
}
@@ -675,13 +528,6 @@ func (qre *QueryExecutor) execDMLPKRows(conn poolConn, query *sqlparser.ParsedQu
// DMLs should only return RowsAffected.
result.RowsAffected += r.RowsAffected
}
- if invalidator == nil {
- return result, nil
- }
- for _, pk := range pkRows {
- key := buildKey(pk)
- invalidator.Delete(key)
- }
return result, nil
}
@@ -694,48 +540,6 @@ func (qre *QueryExecutor) execSet() (*sqltypes.Result, error) {
return qre.directFetch(conn, qre.plan.FullQuery, qre.bindVars, nil)
}
-func parseInt64(v interface{}) (int64, error) {
- if ival, ok := v.(int64); ok {
- return ival, nil
- }
- return -1, NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "got %v, want int64", v)
-}
-
-func parseFloat64(v interface{}) (float64, error) {
- if ival, ok := v.(int64); ok {
- return float64(ival), nil
- }
- if fval, ok := v.(float64); ok {
- return fval, nil
- }
- return -1, NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "got %v, want int64 or float64", v)
-}
-
-func parseDuration(v interface{}) (time.Duration, error) {
- val, err := parseFloat64(v)
- if err != nil {
- return 0, err
- }
- // time.Duration is an int64, have to multiple by 1e9 because
- // val might be in range (0, 1)
- return time.Duration(val * 1e9), nil
-}
-
-func rowsAreEqual(row1, row2 []sqltypes.Value) bool {
- if len(row1) != len(row2) {
- return false
- }
- for i := 0; i < len(row1); i++ {
- if row1[i].IsNull() && row2[i].IsNull() {
- continue
- }
- if (row1[i].IsNull() && !row2[i].IsNull()) || (!row1[i].IsNull() && row2[i].IsNull()) || row1[i].String() != row2[i].String() {
- return false
- }
- }
- return true
-}
-
func (qre *QueryExecutor) getConn(pool *ConnPool) (*DBConn, error) {
span := trace.NewSpanFromContext(qre.ctx)
span.StartLocal("QueryExecutor.getConn")
diff --git a/go/vt/tabletserver/query_executor_test.go b/go/vt/tabletserver/query_executor_test.go
index 9152c138a04..0923ae24931 100644
--- a/go/vt/tabletserver/query_executor_test.go
+++ b/go/vt/tabletserver/query_executor_test.go
@@ -20,7 +20,6 @@ import (
"github.com/youtube/vitess/go/vt/callinfo"
"github.com/youtube/vitess/go/vt/tableacl"
"github.com/youtube/vitess/go/vt/tableacl/simpleacl"
- "github.com/youtube/vitess/go/vt/tabletserver/fakecacheservice"
"github.com/youtube/vitess/go/vt/tabletserver/planbuilder"
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
@@ -38,7 +37,7 @@ func TestQueryExecutorPlanDDL(t *testing.T) {
}
db.AddQuery(query, want)
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanDDL, qre.plan.PlanID)
@@ -74,7 +73,7 @@ func TestQueryExecutorPlanPassDmlStrictMode(t *testing.T) {
tsv.StopService()
// strict mode
- tsv = newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv = newTestTabletServer(ctx, enableStrict, db)
qre = newTestQueryExecutor(ctx, tsv, query, newTransaction(tsv))
defer tsv.StopService()
defer testCommitHelper(t, tsv, qre)
@@ -115,7 +114,7 @@ func TestQueryExecutorPlanPassDmlStrictModeAutoCommit(t *testing.T) {
// strict mode
// update should fail because strict mode is not enabled
- tsv = newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv = newTestTabletServer(ctx, enableStrict, db)
qre = newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanPassDML, qre.plan.PlanID)
@@ -140,7 +139,7 @@ func TestQueryExecutorPlanInsertPk(t *testing.T) {
}
query := "insert into test_table values(1)"
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanInsertPK, qre.plan.PlanID)
@@ -172,7 +171,7 @@ func TestQueryExecutorPlanInsertSubQueryAutoCommmit(t *testing.T) {
db.AddQuery(insertQuery, &sqltypes.Result{})
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanInsertSubquery, qre.plan.PlanID)
@@ -204,7 +203,7 @@ func TestQueryExecutorPlanInsertSubQuery(t *testing.T) {
db.AddQuery(insertQuery, &sqltypes.Result{})
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, newTransaction(tsv))
defer tsv.StopService()
@@ -227,7 +226,7 @@ func TestQueryExecutorPlanUpsertPk(t *testing.T) {
}
query := "insert into test_table values(1) on duplicate key update val=1"
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanUpsertPK, qre.plan.PlanID)
@@ -283,7 +282,7 @@ func TestQueryExecutorPlanDmlPk(t *testing.T) {
want := &sqltypes.Result{}
db.AddQuery(query, want)
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, newTransaction(tsv))
defer tsv.StopService()
defer testCommitHelper(t, tsv, qre)
@@ -303,7 +302,7 @@ func TestQueryExecutorPlanDmlAutoCommit(t *testing.T) {
want := &sqltypes.Result{}
db.AddQuery(query, want)
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanDMLPK, qre.plan.PlanID)
@@ -324,7 +323,7 @@ func TestQueryExecutorPlanDmlSubQuery(t *testing.T) {
db.AddQuery(query, want)
db.AddQuery(expandedQuery, want)
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, newTransaction(tsv))
defer tsv.StopService()
defer testCommitHelper(t, tsv, qre)
@@ -346,7 +345,7 @@ func TestQueryExecutorPlanDmlSubQueryAutoCommit(t *testing.T) {
db.AddQuery(query, want)
db.AddQuery(expandedQuery, want)
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanDMLSubquery, qre.plan.PlanID)
@@ -369,7 +368,7 @@ func TestQueryExecutorPlanOtherWithinATransaction(t *testing.T) {
}
db.AddQuery(query, want)
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, newTransaction(tsv))
defer tsv.StopService()
defer testCommitHelper(t, tsv, qre)
@@ -427,10 +426,10 @@ func TestQueryExecutorPlanPassSelectWithLockOutsideATransaction(t *testing.T) {
Fields: getTestTableFields(),
})
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
- checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID)
+ checkPlanID(t, planbuilder.PlanSelectLock, qre.plan.PlanID)
_, err := qre.Execute()
if err == nil {
t.Fatal("got: nil, want: error")
@@ -456,7 +455,7 @@ func TestQueryExecutorPlanPassSelect(t *testing.T) {
Fields: getTestTableFields(),
})
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID)
@@ -469,98 +468,12 @@ func TestQueryExecutorPlanPassSelect(t *testing.T) {
}
}
-func TestQueryExecutorPlanPKIn(t *testing.T) {
- db := setUpQueryExecutorTest()
- query := "select * from test_table where pk in (1, 2, 3) limit 1000"
- expandedQuery := "select pk, name, addr from test_table where pk in (1, 2, 3)"
- want := &sqltypes.Result{
- Fields: getTestTableFields(),
- RowsAffected: 1,
- Rows: [][]sqltypes.Value{
- {
- sqltypes.MakeTrusted(sqltypes.Int32, []byte("1")),
- sqltypes.MakeTrusted(sqltypes.Int32, []byte("20")),
- sqltypes.MakeTrusted(sqltypes.Int32, []byte("30")),
- },
- },
- }
- db.AddQuery(query, want)
- db.AddQuery(expandedQuery, want)
- db.AddQuery("select * from test_table where 1 != 1", &sqltypes.Result{
- Fields: getTestTableFields(),
- })
- ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict, db)
- qre := newTestQueryExecutor(ctx, tsv, query, 0)
- defer tsv.StopService()
- checkPlanID(t, planbuilder.PlanPKIn, qre.plan.PlanID)
- got, err := qre.Execute()
- if err != nil {
- t.Fatalf("qre.Execute() = %v, want nil", err)
- }
- if !reflect.DeepEqual(got, want) {
- t.Fatalf("got: %v, want: %v", got, want)
- }
-
- cachedQuery := "select pk, name, addr from test_table where pk in (1)"
- db.AddQuery(cachedQuery, &sqltypes.Result{
- Fields: getTestTableFields(),
- RowsAffected: 1,
- Rows: [][]sqltypes.Value{
- {
- sqltypes.MakeTrusted(sqltypes.Int32, []byte("1")),
- sqltypes.MakeTrusted(sqltypes.Int32, []byte("20")),
- sqltypes.MakeTrusted(sqltypes.Int32, []byte("30")),
- },
- },
- })
-
- nonCachedQuery := "select pk, name, addr from test_table where pk in (2, 3)"
- db.AddQuery(nonCachedQuery, &sqltypes.Result{})
- db.AddQuery(cachedQuery, want)
- // run again, this time pk=1 should hit the rowcache
- got, err = qre.Execute()
- if err != nil {
- t.Fatalf("qre.Execute() = %v, want nil", err)
- }
- if !reflect.DeepEqual(got, want) {
- t.Fatalf("got: %v, want: %v", got, want)
- }
-}
-
-func TestQueryExecutorPlanSelectSubQuery(t *testing.T) {
- db := setUpQueryExecutorTest()
- query := "select * from test_table where name = 1 limit 1000"
- expandedQuery := "select pk from test_table use index (`index`) where name = 1 limit 1000"
- want := &sqltypes.Result{
- Fields: getTestTableFields(),
- }
- db.AddQuery(query, want)
- db.AddQuery(expandedQuery, want)
-
- db.AddQuery("select * from test_table where 1 != 1", &sqltypes.Result{
- Fields: getTestTableFields(),
- })
- ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict, db)
- qre := newTestQueryExecutor(ctx, tsv, query, 0)
- defer tsv.StopService()
- checkPlanID(t, planbuilder.PlanSelectSubquery, qre.plan.PlanID)
- got, err := qre.Execute()
- if err != nil {
- t.Fatalf("qre.Execute() = %v, want nil", err)
- }
- if !reflect.DeepEqual(got, want) {
- t.Fatalf("got: %v, want: %v", got, want)
- }
-}
-
func TestQueryExecutorPlanSet(t *testing.T) {
db := setUpQueryExecutorTest()
setQuery := "set unknown_key = 1"
db.AddQuery(setQuery, &sqltypes.Result{})
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
defer tsv.StopService()
qre := newTestQueryExecutor(ctx, tsv, setQuery, 0)
checkPlanID(t, planbuilder.PlanSet, qre.plan.PlanID)
@@ -588,7 +501,7 @@ func TestQueryExecutorPlanOther(t *testing.T) {
}
db.AddQuery(query, want)
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanOther, qre.plan.PlanID)
@@ -615,7 +528,7 @@ func TestQueryExecutorPlanNextval(t *testing.T) {
updateQuery := "update `seq` set next_id = 7 where id = 0"
db.AddQuery(updateQuery, &sqltypes.Result{})
ctx := context.Background()
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
defer tsv.StopService()
qre := newTestQueryExecutor(ctx, tsv, "select next value from seq", 0)
checkPlanID(t, planbuilder.PlanNextval, qre.plan.PlanID)
@@ -670,7 +583,7 @@ func TestQueryExecutorTableAcl(t *testing.T) {
t.Fatalf("unable to load tableacl config, error: %v", err)
}
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID)
@@ -716,7 +629,7 @@ func TestQueryExecutorTableAclNoPermission(t *testing.T) {
t.Fatalf("unable to load tableacl config, error: %v", err)
}
// without enabling Config.StrictTableAcl
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID)
got, err := qre.Execute()
@@ -729,7 +642,7 @@ func TestQueryExecutorTableAclNoPermission(t *testing.T) {
tsv.StopService()
// enable Config.StrictTableAcl
- tsv = newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict|enableStrictTableAcl, db)
+ tsv = newTestTabletServer(ctx, enableStrict|enableStrictTableAcl, db)
qre = newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID)
@@ -782,7 +695,7 @@ func TestQueryExecutorTableAclExemptACL(t *testing.T) {
}
// enable Config.StrictTableAcl
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict|enableStrictTableAcl, db)
+ tsv := newTestTabletServer(ctx, enableStrict|enableStrictTableAcl, db)
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID)
@@ -861,7 +774,7 @@ func TestQueryExecutorTableAclDryRun(t *testing.T) {
username,
}, ".")
// enable Config.StrictTableAcl
- tsv := newTestTabletServer(ctx, enableRowCache|enableSchemaOverrides|enableStrict|enableStrictTableAcl, db)
+ tsv := newTestTabletServer(ctx, enableStrict|enableStrictTableAcl, db)
tsv.qe.enableTableAclDryRun = true
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
@@ -899,7 +812,7 @@ func TestQueryExecutorBlacklistQRFail(t *testing.T) {
alterRule.SetIPCond(bannedAddr)
alterRule.SetUserCond(bannedUser)
alterRule.SetQueryCond("select.*")
- alterRule.AddPlanCond(planbuilder.PlanSelectSubquery)
+ alterRule.AddPlanCond(planbuilder.PlanPassSelect)
alterRule.AddTableCond("test_table")
rulesName := "blacklistedRulesQRFail"
@@ -911,7 +824,7 @@ func TestQueryExecutorBlacklistQRFail(t *testing.T) {
username: bannedUser,
}
ctx := callinfo.NewContext(context.Background(), callInfo)
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
tsv.qe.schemaInfo.queryRuleSources.UnRegisterQueryRuleSource(rulesName)
tsv.qe.schemaInfo.queryRuleSources.RegisterQueryRuleSource(rulesName)
defer tsv.qe.schemaInfo.queryRuleSources.UnRegisterQueryRuleSource(rulesName)
@@ -923,7 +836,7 @@ func TestQueryExecutorBlacklistQRFail(t *testing.T) {
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
- checkPlanID(t, planbuilder.PlanSelectSubquery, qre.plan.PlanID)
+ checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID)
// execute should fail because query has been blacklisted
_, err := qre.Execute()
if err == nil {
@@ -959,7 +872,7 @@ func TestQueryExecutorBlacklistQRRetry(t *testing.T) {
alterRule.SetIPCond(bannedAddr)
alterRule.SetUserCond(bannedUser)
alterRule.SetQueryCond("select.*")
- alterRule.AddPlanCond(planbuilder.PlanSelectSubquery)
+ alterRule.AddPlanCond(planbuilder.PlanPassSelect)
alterRule.AddTableCond("test_table")
rulesName := "blacklistedRulesQRRetry"
@@ -971,7 +884,7 @@ func TestQueryExecutorBlacklistQRRetry(t *testing.T) {
username: bannedUser,
}
ctx := callinfo.NewContext(context.Background(), callInfo)
- tsv := newTestTabletServer(ctx, enableRowCache|enableStrict, db)
+ tsv := newTestTabletServer(ctx, enableStrict, db)
tsv.qe.schemaInfo.queryRuleSources.UnRegisterQueryRuleSource(rulesName)
tsv.qe.schemaInfo.queryRuleSources.RegisterQueryRuleSource(rulesName)
defer tsv.qe.schemaInfo.queryRuleSources.UnRegisterQueryRuleSource(rulesName)
@@ -983,7 +896,7 @@ func TestQueryExecutorBlacklistQRRetry(t *testing.T) {
qre := newTestQueryExecutor(ctx, tsv, query, 0)
defer tsv.StopService()
- checkPlanID(t, planbuilder.PlanSelectSubquery, qre.plan.PlanID)
+ checkPlanID(t, planbuilder.PlanPassSelect, qre.plan.PlanID)
_, err := qre.Execute()
if err == nil {
t.Fatal("got: nil, want: error")
@@ -1000,10 +913,8 @@ func TestQueryExecutorBlacklistQRRetry(t *testing.T) {
type executorFlags int64
const (
- noFlags executorFlags = iota
- enableRowCache = 1 << iota
- enableSchemaOverrides
- enableStrict
+ noFlags executorFlags = 0
+ enableStrict = 1 << iota
enableStrictTableAcl
)
@@ -1013,11 +924,9 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb
config := DefaultQsConfig
config.StatsPrefix = fmt.Sprintf("Stats-%d-", randID)
config.DebugURLPrefix = fmt.Sprintf("/debug-%d-", randID)
- config.RowCache.StatsPrefix = fmt.Sprintf("Stats-%d-", randID)
config.PoolNamePrefix = fmt.Sprintf("Pool-%d-", randID)
config.PoolSize = 100
config.TransactionCap = 100
- config.SpotCheckRatio = 1.0
config.EnablePublishStats = false
config.EnableAutoCommit = true
@@ -1026,11 +935,6 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb
} else {
config.StrictMode = false
}
- if flags&enableRowCache > 0 {
- config.RowCache.Enabled = true
- config.RowCache.Binary = "ls"
- config.RowCache.Connections = 100
- }
if flags&enableStrictTableAcl > 0 {
config.StrictTableAcl = true
} else {
@@ -1039,12 +943,8 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb
tsv := NewTabletServer(config)
testUtils := newTestUtils()
dbconfigs := testUtils.newDBConfigs(db)
- schemaOverrides := []SchemaOverride{}
- if flags&enableSchemaOverrides > 0 {
- schemaOverrides = getTestTableSchemaOverrides()
- }
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- tsv.StartService(target, dbconfigs, schemaOverrides, testUtils.newMysqld(&dbconfigs))
+ tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
return tsv
}
@@ -1076,7 +976,6 @@ func testCommitHelper(t *testing.T, tsv *TabletServer, queryExecutor *QueryExecu
}
func setUpQueryExecutorTest() *fakesqldb.DB {
- fakecacheservice.Register()
db := fakesqldb.Register()
initQueryExecutorTestDB(db)
return db
@@ -1106,22 +1005,6 @@ func checkPlanID(
}
}
-func getTestTableSchemaOverrides() []SchemaOverride {
- return []SchemaOverride{
- {
- Name: "test_table",
- PKColumns: []string{"pk"},
- Cache: &struct {
- Type string
- Table string
- }{
- Type: "RW",
- Table: "test_table",
- },
- },
- }
-}
-
func getQueryExecutorSupportedQueries() map[string]*sqltypes.Result {
return map[string]*sqltypes.Result{
// queries for schema info
diff --git a/go/vt/tabletserver/query_rules_test.go b/go/vt/tabletserver/query_rules_test.go
index 7bbcf89d0bc..623ef8e0a11 100644
--- a/go/vt/tabletserver/query_rules_test.go
+++ b/go/vt/tabletserver/query_rules_test.go
@@ -99,7 +99,7 @@ func TestFilterByPlan(t *testing.T) {
qr2 := NewQueryRule("rule 2", "r2", QRFail)
qr2.AddPlanCond(planbuilder.PlanPassSelect)
- qr2.AddPlanCond(planbuilder.PlanPKIn)
+ qr2.AddPlanCond(planbuilder.PlanSelectLock)
qr2.AddBindVarCond("a", true, false, QRNoOp, nil)
qr3 := NewQueryRule("rule 3", "r3", QRFail)
@@ -166,7 +166,7 @@ func TestFilterByPlan(t *testing.T) {
t.Errorf("qrs1:\n%s, want\n%s", got, want)
}
- qrs1 = qrs.filterByPlan("insert", planbuilder.PlanPKIn, "a")
+ qrs1 = qrs.filterByPlan("insert", planbuilder.PlanSelectLock, "a")
got = marshalled(qrs1)
if got != want {
t.Errorf("qrs1:\n%s, want\n%s", got, want)
diff --git a/go/vt/tabletserver/query_service_stats.go b/go/vt/tabletserver/query_service_stats.go
index 658fcf6a802..c410487b48c 100644
--- a/go/vt/tabletserver/query_service_stats.go
+++ b/go/vt/tabletserver/query_service_stats.go
@@ -38,8 +38,6 @@ type QueryServiceStats struct {
QPSRates *stats.Rates
// ResultStats shows the histogram of number of rows returned.
ResultStats *stats.Histogram
- // SpotCheckCount shows the number of spot check events happened.
- SpotCheckCount *stats.Int
}
// NewQueryServiceStats returns a new QueryServiceStats instance.
@@ -53,7 +51,6 @@ func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QuerySer
errorStatsName := ""
internalErrorsName := ""
resultStatsName := ""
- spotCheckCountName := ""
userTableQueryCountName := ""
userTableQueryTimesNsName := ""
userTransactionCountName := ""
@@ -68,7 +65,6 @@ func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QuerySer
errorStatsName = statsPrefix + "Errors"
internalErrorsName = statsPrefix + "InternalErrors"
resultStatsName = statsPrefix + "Results"
- spotCheckCountName = statsPrefix + "RowcacheSpotCheckCount"
userTableQueryCountName = statsPrefix + "UserTableQueryCount"
userTableQueryTimesNsName = statsPrefix + "UserTableQueryTimesNs"
userTransactionCountName = statsPrefix + "UserTransactionCount"
@@ -83,7 +79,7 @@ func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QuerySer
KillStats: stats.NewCounters(killStatsName, "Transactions", "Queries"),
InfoErrors: stats.NewCounters(infoErrorsName, "Retry", "Fatal", "DupKey"),
ErrorStats: stats.NewCounters(errorStatsName, "Fail", "TxPoolFull", "NotInTx", "Deadlock"),
- InternalErrors: stats.NewCounters(internalErrorsName, "Task", "MemcacheStats",
+ InternalErrors: stats.NewCounters(internalErrorsName, "Task",
"Mismatch", "StrayTransactions", "Invalidation", "Panic", "HungQuery", "Schema"),
UserTableQueryCount: stats.NewMultiCounters(
userTableQueryCountName, []string{"TableName", "CallerID", "Type"}),
@@ -94,8 +90,7 @@ func NewQueryServiceStats(statsPrefix string, enablePublishStats bool) *QuerySer
UserTransactionTimesNs: stats.NewMultiCounters(
userTransactionTimesNsName, []string{"CallerID", "Conclusion"}),
// Sample every 5 seconds and keep samples for up to 15 minutes.
- QPSRates: stats.NewRates(qpsRateName, queryStats, 15*60/5, 5*time.Second),
- ResultStats: stats.NewHistogram(resultStatsName, resultBuckets),
- SpotCheckCount: stats.NewInt(spotCheckCountName),
+ QPSRates: stats.NewRates(qpsRateName, queryStats, 15*60/5, 5*time.Second),
+ ResultStats: stats.NewHistogram(resultStatsName, resultBuckets),
}
}
diff --git a/go/vt/tabletserver/querylogz.go b/go/vt/tabletserver/querylogz.go
index df614434e99..3289a2feb82 100644
--- a/go/vt/tabletserver/querylogz.go
+++ b/go/vt/tabletserver/querylogz.go
@@ -35,10 +35,6 @@ var (
Sources |
RowsAffected |
Response Size |
- Cache Hits |
- Cache Misses |
- Cache Absent |
- Cache Invalidations |
Transaction ID |
Error |
@@ -66,10 +62,6 @@ var (
{{.FmtQuerySources}} |
{{.RowsAffected}} |
{{.SizeOfResponse}} |
- {{.CacheHits}} |
- {{.CacheMisses}} |
- {{.CacheAbsent}} |
- {{.CacheInvalidations}} |
{{.TransactionID}} |
{{.ErrorStr}} |
diff --git a/go/vt/tabletserver/querylogz_test.go b/go/vt/tabletserver/querylogz_test.go
index a77153214e2..c9aaa653c22 100644
--- a/go/vt/tabletserver/querylogz_test.go
+++ b/go/vt/tabletserver/querylogz_test.go
@@ -40,10 +40,6 @@ func TestQuerylogzHandler(t *testing.T) {
logStats.StartTime, _ = time.Parse("Jan 2 15:04:05", "Nov 29 13:33:09")
logStats.MysqlResponseTime = 1 * time.Millisecond
logStats.WaitingForConnection = 10 * time.Nanosecond
- logStats.CacheHits = 17
- logStats.CacheAbsent = 5
- logStats.CacheMisses = 2
- logStats.CacheInvalidations = 3
logStats.TransactionID = 131
logStats.ctx = callerid.NewContext(
context.Background(),
@@ -68,10 +64,6 @@ func TestQuerylogzHandler(t *testing.T) {
`none | `,
`1000 | `,
`0 | `,
- `17 | `,
- `2 | `,
- `5 | `,
- `3 | `,
`131 | `,
` | `,
}
@@ -101,10 +93,6 @@ func TestQuerylogzHandler(t *testing.T) {
`none | `,
`1000 | `,
`0 | `,
- `17 | `,
- `2 | `,
- `5 | `,
- `3 | `,
`131 | `,
` | `,
}
@@ -134,10 +122,6 @@ func TestQuerylogzHandler(t *testing.T) {
`none | `,
`1000 | `,
`0 | `,
- `17 | `,
- `2 | `,
- `5 | `,
- `3 | `,
`131 | `,
` | `,
}
diff --git a/go/vt/tabletserver/queryz_test.go b/go/vt/tabletserver/queryz_test.go
index 80d5033adef..2039c90b6d9 100644
--- a/go/vt/tabletserver/queryz_test.go
+++ b/go/vt/tabletserver/queryz_test.go
@@ -25,7 +25,7 @@ func TestQueryzHandler(t *testing.T) {
ExecPlan: &planbuilder.ExecPlan{
TableName: "test_table",
PlanID: planbuilder.PlanPassSelect,
- Reason: planbuilder.ReasonSelect,
+ Reason: planbuilder.ReasonTable,
},
}
plan1.AddStats(10, 1*time.Second, 2, 0)
@@ -59,7 +59,7 @@ func TestQueryzHandler(t *testing.T) {
`select name from test_table | `,
`test_table | `,
`PASS_SELECT | `,
- `SELECT | `,
+ `TABLE | `,
`10 | `,
`1.000000 | `,
`2 | `,
diff --git a/go/vt/tabletserver/rowcache.go b/go/vt/tabletserver/rowcache.go
deleted file mode 100644
index 399c223c6dd..00000000000
--- a/go/vt/tabletserver/rowcache.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tabletserver
-
-import (
- "encoding/binary"
- "strconv"
- "time"
-
- "github.com/youtube/vitess/go/sqltypes"
- "github.com/youtube/vitess/go/stats"
- vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc"
- "golang.org/x/net/context"
-)
-
-var cacheStats = stats.NewTimings("Rowcache")
-
-var pack = binary.BigEndian
-
-const (
- rcDeleted = 1
-
- // maxKeyLen is a value less than memcache's limit of 250.
- maxKeyLen = 200
-
- // maxDataLen prevents large rows from being inserted in rowcache.
- maxDataLen = 8000
-)
-
-// RowCache gives a table-level view into the rowcache.
-type RowCache struct {
- tableInfo *TableInfo
- prefix string
- cachePool *CachePool
-}
-
-// RCResult represents the result of a cache multi-fetch.
-type RCResult struct {
- Row []sqltypes.Value
- Cas uint64
-}
-
-// NewRowCache creates a new RowCache.
-func NewRowCache(tableInfo *TableInfo, cachePool *CachePool) *RowCache {
- prefix := strconv.FormatInt(cachePool.maxPrefix.Add(1), 36) + "."
- return &RowCache{tableInfo, prefix, cachePool}
-}
-
-// Get fetches the values for the specified keys.
-func (rc *RowCache) Get(ctx context.Context, keys []string) (results map[string]RCResult) {
- mkeys := make([]string, 0, len(keys))
- for _, key := range keys {
- if len(key) > maxKeyLen {
- continue
- }
- mkeys = append(mkeys, rc.prefix+key)
- }
- prefixlen := len(rc.prefix)
- conn := rc.cachePool.Get(ctx)
- // This is not the same as defer rc.cachePool.Put(conn)
- defer func() { rc.cachePool.Put(conn) }()
-
- defer cacheStats.Record("Exec", time.Now())
- mcresults, err := conn.Gets(mkeys...)
- if err != nil {
- conn.Close()
- conn = nil
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "%s", err))
- }
- results = make(map[string]RCResult, len(mkeys))
- for _, mcresult := range mcresults {
- if mcresult.Flags == rcDeleted {
- // The row was recently invalidated.
- // If the caller reads the row from db, they can update it
- // back as long as it's not updated again.
- results[mcresult.Key[prefixlen:]] = RCResult{Cas: mcresult.Cas}
- continue
- }
- row := rc.decodeRow(mcresult.Value)
- if row == nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Corrupt data for %s", mcresult.Key))
- }
- results[mcresult.Key[prefixlen:]] = RCResult{Row: row, Cas: mcresult.Cas}
- }
- return
-}
-
-// Set pushes the specified row into the rowcache.
-func (rc *RowCache) Set(ctx context.Context, key string, row []sqltypes.Value, cas uint64) {
- if len(key) > maxKeyLen {
- return
- }
- b := rc.encodeRow(row)
- if b == nil {
- return
- }
- conn := rc.cachePool.Get(ctx)
- defer func() { rc.cachePool.Put(conn) }()
- mkey := rc.prefix + key
-
- var err error
- if cas == 0 {
- // Either caller didn't find the value at all
- // or they didn't look for it in the first place.
- _, err = conn.Add(mkey, 0, 0, b)
- } else {
- // Caller is trying to update a row that recently changed.
- _, err = conn.Cas(mkey, 0, 0, b, cas)
- }
- if err != nil {
- conn.Close()
- conn = nil
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "%s", err))
- }
-}
-
-// Delete marks the row as deleted.
-func (rc *RowCache) Delete(ctx context.Context, key string) {
- if len(key) > maxKeyLen {
- return
- }
- conn := rc.cachePool.Get(ctx)
- defer func() { rc.cachePool.Put(conn) }()
- mkey := rc.prefix + key
-
- _, err := conn.Set(mkey, rcDeleted, 0, nil)
- if err != nil {
- conn.Close()
- conn = nil
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "%s", err))
- }
-}
-
-func (rc *RowCache) encodeRow(row []sqltypes.Value) (b []byte) {
- length := 0
- for _, v := range row {
- length += v.Len()
- if length > maxDataLen {
- return nil
- }
- }
- datastart := 4 + len(row)*4
- b = make([]byte, datastart+length)
- data := b[datastart:datastart]
- pack.PutUint32(b, uint32(len(row)))
- for i, v := range row {
- if v.IsNull() {
- pack.PutUint32(b[4+i*4:], 0xFFFFFFFF)
- continue
- }
- data = append(data, v.Raw()...)
- pack.PutUint32(b[4+i*4:], uint32(v.Len()))
- }
- return b
-}
-
-func (rc *RowCache) decodeRow(b []byte) (row []sqltypes.Value) {
- rowlen := pack.Uint32(b)
- data := b[4+rowlen*4:]
- row = make([]sqltypes.Value, rowlen)
- for i := range row {
- length := pack.Uint32(b[4+i*4:])
- if length == 0xFFFFFFFF {
- continue
- }
- if length > uint32(len(data)) {
- // Corrupt data
- return nil
- }
- // rowcache values are trusted.
- row[i] = sqltypes.MakeTrusted(rc.tableInfo.Columns[i].Type, data[:length])
- data = data[length:]
- }
- return row
-}
diff --git a/go/vt/tabletserver/rowcache_invalidator.go b/go/vt/tabletserver/rowcache_invalidator.go
deleted file mode 100644
index 202a8fc71ab..00000000000
--- a/go/vt/tabletserver/rowcache_invalidator.go
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2012, Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tabletserver
-
-import (
- "fmt"
- "sync"
- "time"
-
- log "github.com/golang/glog"
- "github.com/youtube/vitess/go/sqltypes"
- "github.com/youtube/vitess/go/stats"
- "github.com/youtube/vitess/go/sync2"
- "github.com/youtube/vitess/go/tb"
- "github.com/youtube/vitess/go/vt/binlog"
- "github.com/youtube/vitess/go/vt/mysqlctl"
- "github.com/youtube/vitess/go/vt/mysqlctl/replication"
- vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc"
- "github.com/youtube/vitess/go/vt/sqlparser"
- "github.com/youtube/vitess/go/vt/tabletserver/planbuilder"
- "golang.org/x/net/context"
-
- binlogdatapb "github.com/youtube/vitess/go/vt/proto/binlogdata"
-)
-
-// RowcacheInvalidator runs the service to invalidate
-// the rowcache based on binlog events.
-type RowcacheInvalidator struct {
- qe *QueryEngine
- checker MySQLChecker
- dbname string
- mysqld mysqlctl.MysqlDaemon
-
- svm sync2.ServiceManager
-
- posMutex sync.Mutex
- pos replication.Position
- lagSeconds sync2.AtomicInt64
-}
-
-// AppendGTID updates the current replication position by appending a GTID to
-// the set of transactions that have been processed.
-func (rci *RowcacheInvalidator) AppendGTID(gtid replication.GTID) {
- rci.posMutex.Lock()
- defer rci.posMutex.Unlock()
- rci.pos = replication.AppendGTID(rci.pos, gtid)
-}
-
-// SetPosition sets the current ReplicationPosition.
-func (rci *RowcacheInvalidator) SetPosition(rp replication.Position) {
- rci.posMutex.Lock()
- defer rci.posMutex.Unlock()
- rci.pos = rp
-}
-
-// Position returns the current ReplicationPosition.
-func (rci *RowcacheInvalidator) Position() replication.Position {
- rci.posMutex.Lock()
- defer rci.posMutex.Unlock()
- return rci.pos
-}
-
-// PositionString returns the current ReplicationPosition as a string.
-func (rci *RowcacheInvalidator) PositionString() string {
- return rci.Position().String()
-}
-
-// NewRowcacheInvalidator creates a new RowcacheInvalidator.
-// Just like QueryEngine, this is a singleton class.
-// You must call this only once.
-func NewRowcacheInvalidator(statsPrefix string, checker MySQLChecker, qe *QueryEngine, enablePublishStats bool) *RowcacheInvalidator {
- rci := &RowcacheInvalidator{checker: checker, qe: qe}
- if enablePublishStats {
- stats.Publish(statsPrefix+"RowcacheInvalidatorState", stats.StringFunc(rci.svm.StateName))
- stats.Publish(statsPrefix+"RowcacheInvalidatorPosition", stats.StringFunc(rci.PositionString))
- stats.Publish(statsPrefix+"RowcacheInvalidatorLagSeconds", stats.IntFunc(rci.lagSeconds.Get))
- }
- return rci
-}
-
-// Open runs the invalidation loop.
-func (rci *RowcacheInvalidator) Open(dbname string, mysqld mysqlctl.MysqlDaemon) {
- // Perform an early check to see if we're already running.
- if rci.svm.State() == sync2.SERVICE_RUNNING {
- return
- }
- rp, err := mysqld.MasterPosition()
- if err != nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Rowcache invalidator aborting: cannot determine replication position: %v", err))
- }
- if mysqld.Cnf().BinLogPath == "" {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Rowcache invalidator aborting: binlog path not specified"))
- }
- err = rci.qe.ClearRowcache(context.Background())
- if err != nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_INTERNAL_ERROR, "Rowcahe is not reachable"))
- }
-
- rci.dbname = dbname
- rci.mysqld = mysqld
- rci.SetPosition(rp)
-
- ok := rci.svm.Go(rci.run)
- if ok {
- log.Infof("Rowcache invalidator starting, dbname: %s, path: %s, position: %v", dbname, mysqld.Cnf().BinLogPath, rp)
- } else {
- log.Infof("Rowcache invalidator already running")
- }
-}
-
-// Close terminates the invalidation loop. It returns only of the
-// loop has terminated.
-func (rci *RowcacheInvalidator) Close() {
- rci.svm.Stop()
-}
-
-func (rci *RowcacheInvalidator) run(ctx *sync2.ServiceContext) error {
- for {
- evs := binlog.NewEventStreamer(rci.dbname, rci.mysqld, rci.Position(), rci.processEvent)
- // We wrap this code in a func so we can catch all panics.
- // If an error is returned, we log it, wait 1 second, and retry.
- // This loop can only be stopped by calling Close.
- err := func() (inner error) {
- defer func() {
- if x := recover(); x != nil {
- inner = fmt.Errorf("%v: uncaught panic:\n%s", x, tb.Stack(4))
- }
- }()
- return evs.Stream(ctx)
- }()
- if err == nil || !ctx.IsRunning() {
- break
- }
- if IsConnErr(err) {
- rci.checker.CheckMySQL()
- }
- log.Errorf("binlog.ServeUpdateStream returned err '%v', retrying in 1 second.", err.Error())
- rci.qe.queryServiceStats.InternalErrors.Add("Invalidation", 1)
- time.Sleep(1 * time.Second)
- }
- log.Infof("Rowcache invalidator stopped")
- return nil
-}
-
-func (rci *RowcacheInvalidator) handleInvalidationError(event *binlogdatapb.StreamEvent) {
- if x := recover(); x != nil {
- terr, ok := x.(*TabletError)
- if !ok {
- log.Errorf("Uncaught panic for %+v:\n%v\n%s", event, x, tb.Stack(4))
- rci.qe.queryServiceStats.InternalErrors.Add("Panic", 1)
- return
- }
- log.Errorf("%v: %+v", terr, event)
- rci.qe.queryServiceStats.InternalErrors.Add("Invalidation", 1)
- }
-}
-
-func (rci *RowcacheInvalidator) processEvent(event *binlogdatapb.StreamEvent) error {
- defer rci.handleInvalidationError(event)
- switch event.Category {
- case binlogdatapb.StreamEvent_SE_DDL:
- log.Infof("DDL invalidation: %s", event.Sql)
- rci.handleDDLEvent(event.Sql)
- case binlogdatapb.StreamEvent_SE_DML:
- rci.handleDMLEvent(event)
- case binlogdatapb.StreamEvent_SE_ERR:
- rci.handleUnrecognizedEvent(event.Sql)
- case binlogdatapb.StreamEvent_SE_POS:
- gtid, err := replication.DecodeGTID(event.TransactionId)
- if err != nil {
- return err
- }
- rci.AppendGTID(gtid)
- default:
- log.Errorf("unknown event: %#v", event)
- rci.qe.queryServiceStats.InternalErrors.Add("Invalidation", 1)
- return nil
- }
- rci.lagSeconds.Set(time.Now().Unix() - event.Timestamp)
- return nil
-}
-
-func (rci *RowcacheInvalidator) handleDMLEvent(event *binlogdatapb.StreamEvent) {
- invalidations := int64(0)
- tableInfo := rci.qe.schemaInfo.GetTable(event.TableName)
- if tableInfo == nil {
- panic(NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "Table %s not found", event.TableName))
- }
- if !tableInfo.IsCached() {
- return
- }
-
- for _, pkTuple := range event.PrimaryKeyValues {
- // We can trust values coming from EventStreamer.
- row := sqltypes.MakeRowTrusted(event.PrimaryKeyFields, pkTuple)
- tableInfo.Cache.Delete(context.Background(), buildKey(row))
- invalidations++
- }
- tableInfo.invalidations.Add(invalidations)
-}
-
-func (rci *RowcacheInvalidator) handleDDLEvent(ddl string) {
- ddlPlan := planbuilder.DDLParse(ddl)
- if ddlPlan.Action == "" {
- panic(NewTabletError(vtrpcpb.ErrorCode_BAD_INPUT, "DDL is not understood"))
- }
- if ddlPlan.TableName != "" && ddlPlan.TableName != ddlPlan.NewName {
- // It's a drop or rename.
- rci.qe.schemaInfo.DropTable(ddlPlan.TableName)
- }
- if ddlPlan.NewName != "" {
- rci.qe.schemaInfo.CreateOrUpdateTable(context.Background(), ddlPlan.NewName)
- }
-}
-
-func (rci *RowcacheInvalidator) handleUnrecognizedEvent(sql string) {
- statement, err := sqlparser.Parse(sql)
- if err != nil {
- log.Errorf("Error: %v: %s", err, sql)
- rci.qe.queryServiceStats.InternalErrors.Add("Invalidation", 1)
- return
- }
- var table *sqlparser.TableName
- switch stmt := statement.(type) {
- case *sqlparser.Insert:
- // Inserts don't affect rowcache.
- return
- case *sqlparser.Update:
- table = stmt.Table
- case *sqlparser.Delete:
- table = stmt.Table
- default:
- log.Errorf("Unrecognized: %s", sql)
- rci.qe.queryServiceStats.InternalErrors.Add("Invalidation", 1)
- return
- }
-
- // Ignore cross-db statements.
- if table.Qualifier != "" && string(table.Qualifier) != rci.qe.dbconfigs.App.DbName {
- return
- }
-
- // Ignore if it's an uncached table.
- tableName := string(table.Name)
- tableInfo := rci.qe.schemaInfo.GetTable(tableName)
- if tableInfo == nil {
- log.Errorf("Table %s not found: %s", tableName, sql)
- rci.qe.queryServiceStats.InternalErrors.Add("Invalidation", 1)
- return
- }
- if !tableInfo.IsCached() {
- return
- }
-
- // Treat the statement as a DDL.
- // It will conservatively invalidate all rows of the table.
- log.Warningf("Treating '%s' as DDL for table %s", sql, tableName)
- rci.qe.schemaInfo.CreateOrUpdateTable(context.Background(), tableName)
-}
diff --git a/go/vt/tabletserver/schema_info.go b/go/vt/tabletserver/schema_info.go
index f0ccfc27d52..824a59e5e6d 100644
--- a/go/vt/tabletserver/schema_info.go
+++ b/go/vt/tabletserver/schema_info.go
@@ -36,7 +36,6 @@ const maxTableCount = 10000
const (
debugQueryPlansKey = "query_plans"
debugQueryStatsKey = "query_stats"
- debugTableStatsKey = "table_stats"
debugSchemaKey = "schema"
debugQueryRulesKey = "query_rules"
)
@@ -83,28 +82,11 @@ func (ep *ExecPlan) Stats() (queryCount int64, duration time.Duration, rowCount,
return
}
-// SchemaOverride is a way to specify how the schema loaded by SchemaInfo
-// must be overridden. Name is the name of the table, PKColumns specifies
-// the new prmiary keys. Cache.Type specifies the rowcache operation for
-// the table. It can be "R", which is read-only, or "RW" for read-write, and
-// Table specifies the rowcache table to operate on.
-// The purpose of this override is mainly to allow views to benefit from
-// the rowcache. It has its downsides. Use carefully.
-type SchemaOverride struct {
- Name string
- PKColumns []string
- Cache *struct {
- Type string
- Table string
- }
-}
-
// SchemaInfo stores the schema info and performs operations that
-// keep itself and the rowcache up-to-date.
+// keep itself up-to-date.
type SchemaInfo struct {
mu sync.Mutex
tables map[string]*TableInfo
- overrides []SchemaOverride
lastChange int64
reloadTime time.Duration
@@ -112,7 +94,6 @@ type SchemaInfo struct {
// their own synchronization.
queries *cache.LRUCache
connPool *ConnPool
- cachePool *CachePool
ticks *timer.Timer
endpoints map[string]string
queryRuleSources *QueryRuleInfo
@@ -126,14 +107,12 @@ func NewSchemaInfo(
queryCacheSize int,
reloadTime time.Duration,
idleTimeout time.Duration,
- cachePool *CachePool,
endpoints map[string]string,
enablePublishStats bool,
queryServiceStats *QueryServiceStats) *SchemaInfo {
si := &SchemaInfo{
queries: cache.NewLRUCache(int64(queryCacheSize)),
connPool: NewConnPool("", 3, idleTimeout, enablePublishStats, queryServiceStats, checker),
- cachePool: cachePool,
ticks: timer.NewTimer(reloadTime),
endpoints: endpoints,
reloadTime: reloadTime,
@@ -148,8 +127,6 @@ func NewSchemaInfo(
return fmt.Sprintf("%v", si.queries.Oldest())
}))
stats.Publish(statsPrefix+"SchemaReloadTime", stats.DurationFunc(si.ticks.Interval))
- _ = stats.NewMultiCountersFunc(statsPrefix+"RowcacheStats", []string{"Table", "Stats"}, si.getRowcacheStats)
- _ = stats.NewMultiCountersFunc(statsPrefix+"RowcacheInvalidations", []string{"Table"}, si.getRowcacheInvalidations)
_ = stats.NewMultiCountersFunc(statsPrefix+"QueryCounts", []string{"Table", "Plan"}, si.getQueryCount)
_ = stats.NewMultiCountersFunc(statsPrefix+"QueryTimesNs", []string{"Table", "Plan"}, si.getQueryTime)
_ = stats.NewMultiCountersFunc(statsPrefix+"QueryRowCounts", []string{"Table", "Plan"}, si.getQueryRowCount)
@@ -166,7 +143,7 @@ func NewSchemaInfo(
}
// Open initializes the current SchemaInfo for service by loading the necessary info from the specified database.
-func (si *SchemaInfo) Open(appParams, dbaParams *sqldb.ConnParams, schemaOverrides []SchemaOverride, strictMode bool) {
+func (si *SchemaInfo) Open(appParams, dbaParams *sqldb.ConnParams, strictMode bool) {
ctx := context.Background()
si.connPool.Open(appParams, dbaParams)
// Get time first because it needs a connection from the pool.
@@ -202,7 +179,6 @@ func (si *SchemaInfo) Open(appParams, dbaParams *sqldb.ConnParams, schemaOverrid
tableName,
row[1].String(), // table_type
row[3].String(), // table_comment
- si.cachePool,
)
if err != nil {
si.recordSchemaError(err, tableName)
@@ -225,10 +201,6 @@ func (si *SchemaInfo) Open(appParams, dbaParams *sqldb.ConnParams, schemaOverrid
si.mu.Lock()
defer si.mu.Unlock()
si.tables = tables
- if schemaOverrides != nil {
- si.overrides = schemaOverrides
- si.override()
- }
si.lastChange = curTime
}()
// Clear is not really needed. Doing it for good measure.
@@ -244,49 +216,6 @@ func (si *SchemaInfo) recordSchemaError(err error, tableName string) {
si.queryServiceStats.InternalErrors.Add("Schema", 1)
}
-// override should be called with a lock on mu held.
-func (si *SchemaInfo) override() {
- for _, override := range si.overrides {
- table, ok := si.tables[override.Name]
- if !ok {
- log.Warningf("Table not found for override: %v", override)
- continue
- }
- if override.PKColumns != nil {
- if err := table.SetPK(override.PKColumns); err != nil {
- log.Warningf("%v: %v", err, override)
- continue
- }
- }
- if si.cachePool.IsClosed() || override.Cache == nil {
- continue
- }
- switch override.Cache.Type {
- case "RW":
- table.Type = schema.CacheRW
- table.Cache = NewRowCache(table, si.cachePool)
- case "W":
- table.Type = schema.CacheW
- if override.Cache.Table == "" {
- log.Warningf("Incomplete cache specs: %v", override)
- continue
- }
- totable, ok := si.tables[override.Cache.Table]
- if !ok {
- log.Warningf("Table not found: %v", override)
- continue
- }
- if totable.Cache == nil {
- log.Warningf("Table has no cache: %v", override)
- continue
- }
- table.Cache = totable.Cache
- default:
- log.Warningf("Ignoring cache override: %v", override)
- }
- }
-}
-
// Close shuts down SchemaInfo. It can be re-opened after Close.
func (si *SchemaInfo) Close() {
si.ticks.Stop()
@@ -296,7 +225,6 @@ func (si *SchemaInfo) Close() {
si.mu.Lock()
defer si.mu.Unlock()
si.tables = nil
- si.overrides = nil
}
// Reload reloads the schema info from the db. Any tables that have changed
@@ -391,7 +319,6 @@ func (si *SchemaInfo) CreateOrUpdateTable(ctx context.Context, tableName string)
tableName,
row[1].String(), // table_type
row[3].String(), // table_comment
- si.cachePool,
)
if err != nil {
si.recordSchemaError(err, tableName)
@@ -413,23 +340,11 @@ func (si *SchemaInfo) CreateOrUpdateTable(ctx context.Context, tableName string)
si.tables[tableName] = tableInfo
switch tableInfo.Type {
- case schema.CacheNone:
+ case schema.NoType:
log.Infof("Initialized table: %s", tableName)
- case schema.CacheRW:
- log.Infof("Initialized cached table: %s, prefix: %s", tableName, tableInfo.Cache.prefix)
- case schema.CacheW:
- log.Infof("Initialized write-only cached table: %s, prefix: %s", tableName, tableInfo.Cache.prefix)
case schema.Sequence:
log.Infof("Initialized sequence: %s", tableName)
}
-
- // If the table has an override, re-apply all overrides.
- for _, o := range si.overrides {
- if o.Name == tableName {
- si.override()
- return
- }
- }
}
// DropTable must be called if a table was dropped.
@@ -590,34 +505,6 @@ func (si *SchemaInfo) ReloadTime() time.Duration {
return si.reloadTime
}
-func (si *SchemaInfo) getRowcacheStats() map[string]int64 {
- si.mu.Lock()
- defer si.mu.Unlock()
- tstats := make(map[string]int64)
- for k, v := range si.tables {
- if v.IsCached() {
- hits, absent, misses, _ := v.Stats()
- tstats[k+".Hits"] = hits
- tstats[k+".Absent"] = absent
- tstats[k+".Misses"] = misses
- }
- }
- return tstats
-}
-
-func (si *SchemaInfo) getRowcacheInvalidations() map[string]int64 {
- si.mu.Lock()
- defer si.mu.Unlock()
- tstats := make(map[string]int64)
- for k, v := range si.tables {
- if v.IsCached() {
- _, _, _, invalidations := v.Stats()
- tstats[k] = invalidations
- }
- }
- return tstats
-}
-
func (si *SchemaInfo) getTableRows() map[string]int64 {
si.mu.Lock()
defer si.mu.Unlock()
@@ -728,8 +615,6 @@ func (si *SchemaInfo) ServeHTTP(response http.ResponseWriter, request *http.Requ
si.handleHTTPQueryPlans(response, request)
} else if ep, ok := si.endpoints[debugQueryStatsKey]; ok && request.URL.Path == ep {
si.handleHTTPQueryStats(response, request)
- } else if ep, ok := si.endpoints[debugTableStatsKey]; ok && request.URL.Path == ep {
- si.handleHTTPTableStats(response, request)
} else if ep, ok := si.endpoints[debugSchemaKey]; ok && request.URL.Path == ep {
si.handleHTTPSchema(response, request)
} else if ep, ok := si.endpoints[debugQueryRulesKey]; ok && request.URL.Path == ep {
@@ -777,32 +662,6 @@ func (si *SchemaInfo) handleHTTPQueryStats(response http.ResponseWriter, request
}
}
-func (si *SchemaInfo) handleHTTPTableStats(response http.ResponseWriter, request *http.Request) {
- response.Header().Set("Content-Type", "application/json; charset=utf-8")
- tstats := make(map[string]struct{ hits, absent, misses, invalidations int64 })
- var temp, totals struct{ hits, absent, misses, invalidations int64 }
- func() {
- si.mu.Lock()
- defer si.mu.Unlock()
- for k, v := range si.tables {
- if v.IsCached() {
- temp.hits, temp.absent, temp.misses, temp.invalidations = v.Stats()
- tstats[k] = temp
- totals.hits += temp.hits
- totals.absent += temp.absent
- totals.misses += temp.misses
- totals.invalidations += temp.invalidations
- }
- }
- }()
- response.Write([]byte("{\n"))
- for k, v := range tstats {
- fmt.Fprintf(response, "\"%s\": {\"Hits\": %v, \"Absent\": %v, \"Misses\": %v, \"Invalidations\": %v},\n", k, v.hits, v.absent, v.misses, v.invalidations)
- }
- fmt.Fprintf(response, "\"Totals\": {\"Hits\": %v, \"Absent\": %v, \"Misses\": %v, \"Invalidations\": %v}\n", totals.hits, totals.absent, totals.misses, totals.invalidations)
- response.Write([]byte("}\n"))
-}
-
func (si *SchemaInfo) handleHTTPSchema(response http.ResponseWriter, request *http.Request) {
response.Header().Set("Content-Type", "application/json; charset=utf-8")
tables := si.GetSchema()
diff --git a/go/vt/tabletserver/schema_info_test.go b/go/vt/tabletserver/schema_info_test.go
index bd944f311ef..97e463e3116 100644
--- a/go/vt/tabletserver/schema_info_test.go
+++ b/go/vt/tabletserver/schema_info_test.go
@@ -8,7 +8,6 @@ import (
"expvar"
"fmt"
"math"
- "math/rand"
"net/http"
"net/http/httptest"
"testing"
@@ -18,8 +17,6 @@ import (
"github.com/youtube/vitess/go/sqldb"
"github.com/youtube/vitess/go/sqltypes"
- "github.com/youtube/vitess/go/vt/schema"
- "github.com/youtube/vitess/go/vt/tabletserver/fakecacheservice"
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
querypb "github.com/youtube/vitess/go/vt/proto/query"
@@ -27,7 +24,6 @@ import (
)
func TestSchemaInfoStrictMode(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoBaseTestQueries() {
db.AddQuery(query, result)
@@ -36,19 +32,16 @@ func TestSchemaInfoStrictMode(t *testing.T) {
t.Log(schemaInfo)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
defer handleAndVerifyTabletError(
t,
"schema info Open should fail because of underlying "+
"connection cannot verify strict mode",
vtrpcpb.ErrorCode_INTERNAL_ERROR,
)
- schemaInfo.Open(&appParams, &dbaParams, []SchemaOverride{}, true)
+ schemaInfo.Open(&appParams, &dbaParams, true)
}
func TestSchemaInfoOpenFailedDueToMissMySQLTime(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
db.AddQuery("select unix_timestamp()", &sqltypes.Result{
// make this query fail
@@ -60,18 +53,15 @@ func TestSchemaInfoOpenFailedDueToMissMySQLTime(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
defer handleAndVerifyTabletError(
t,
"schema info Open should fail because of it could not get MySQL time",
vtrpcpb.ErrorCode_UNKNOWN_ERROR,
)
- schemaInfo.Open(&appParams, &dbaParams, []SchemaOverride{}, false)
+ schemaInfo.Open(&appParams, &dbaParams, false)
}
func TestSchemaInfoOpenFailedDueToIncorrectMysqlRowNum(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
db.AddQuery("select unix_timestamp()", &sqltypes.Result{
RowsAffected: 1,
@@ -83,18 +73,15 @@ func TestSchemaInfoOpenFailedDueToIncorrectMysqlRowNum(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
defer handleAndVerifyTabletError(
t,
"schema info Open should fail because of incorrect MySQL row number",
vtrpcpb.ErrorCode_UNKNOWN_ERROR,
)
- schemaInfo.Open(&appParams, &dbaParams, []SchemaOverride{}, false)
+ schemaInfo.Open(&appParams, &dbaParams, false)
}
func TestSchemaInfoOpenFailedDueToInvalidTimeFormat(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
db.AddQuery("select unix_timestamp()", &sqltypes.Result{
RowsAffected: 1,
@@ -106,18 +93,15 @@ func TestSchemaInfoOpenFailedDueToInvalidTimeFormat(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
defer handleAndVerifyTabletError(
t,
"schema info Open should fail because it could not get MySQL time",
vtrpcpb.ErrorCode_UNKNOWN_ERROR,
)
- schemaInfo.Open(&appParams, &dbaParams, []SchemaOverride{}, false)
+ schemaInfo.Open(&appParams, &dbaParams, false)
}
func TestSchemaInfoOpenFailedDueToExecErr(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoBaseTestQueries() {
db.AddQuery(query, result)
@@ -129,18 +113,15 @@ func TestSchemaInfoOpenFailedDueToExecErr(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
defer handleAndVerifyTabletError(
t,
"schema info Open should fail because conn.Exec failed",
vtrpcpb.ErrorCode_UNKNOWN_ERROR,
)
- schemaInfo.Open(&appParams, &dbaParams, []SchemaOverride{}, false)
+ schemaInfo.Open(&appParams, &dbaParams, false)
}
func TestSchemaInfoOpenFailedDueToTableInfoErr(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoBaseTestQueries() {
db.AddQuery(query, result)
@@ -158,46 +139,15 @@ func TestSchemaInfoOpenFailedDueToTableInfoErr(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
defer handleAndVerifyTabletError(
t,
"schema info Open should fail because NewTableInfo failed",
vtrpcpb.ErrorCode_INTERNAL_ERROR,
)
- schemaInfo.Open(&appParams, &dbaParams, []SchemaOverride{}, false)
-}
-
-func TestSchemaInfoOpenWithSchemaOverride(t *testing.T) {
- fakecacheservice.Register()
- db := fakesqldb.Register()
- for query, result := range getSchemaInfoTestSupportedQueries() {
- db.AddQuery(query, result)
- }
- schemaInfo := newTestSchemaInfo(10, 10*time.Second, 10*time.Second, false)
- appParams := sqldb.ConnParams{Engine: db.Name}
- dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaOverrides := getSchemaInfoTestSchemaOverride()
- // test cache type RW
- schemaInfo.Open(&appParams, &dbaParams, schemaOverrides, true)
- testTableInfo := schemaInfo.GetTable("test_table_01")
- if testTableInfo.Table.Type != schema.CacheRW {
- t.Fatalf("test_table_01's cache type should be RW")
- }
- schemaInfo.Close()
- // test cache type W
- schemaInfo.Open(&appParams, &dbaParams, schemaOverrides, true)
- testTableInfo = schemaInfo.GetTable("test_table_02")
- if testTableInfo.Table.Type != schema.CacheW {
- t.Fatalf("test_table_02's cache type should be W")
- }
- schemaInfo.Close()
+ schemaInfo.Open(&appParams, &dbaParams, false)
}
func TestSchemaInfoReload(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -206,10 +156,7 @@ func TestSchemaInfoReload(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 10*time.Second, idleTimeout, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- // test cache type RW
- schemaInfo.Open(&appParams, &dbaParams, nil, true)
+ schemaInfo.Open(&appParams, &dbaParams, true)
defer schemaInfo.Close()
// this new table does not exist
newTable := "test_table_04"
@@ -278,7 +225,6 @@ func TestSchemaInfoReload(t *testing.T) {
}
func TestSchemaInfoCreateOrUpdateTableFailedDuetoExecErr(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -294,9 +240,7 @@ func TestSchemaInfoCreateOrUpdateTableFailedDuetoExecErr(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, true)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaInfo.Open(&appParams, &dbaParams, getSchemaInfoTestSchemaOverride(), false)
+ schemaInfo.Open(&appParams, &dbaParams, false)
defer schemaInfo.Close()
originalSchemaErrorCount := schemaInfo.queryServiceStats.InternalErrors.Counts()["Schema"]
// should silently fail: no errors returned, but increment a counter
@@ -310,7 +254,6 @@ func TestSchemaInfoCreateOrUpdateTableFailedDuetoExecErr(t *testing.T) {
}
func TestSchemaInfoCreateOrUpdateTable(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -326,15 +269,12 @@ func TestSchemaInfoCreateOrUpdateTable(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaInfo.Open(&appParams, &dbaParams, getSchemaInfoTestSchemaOverride(), false)
+ schemaInfo.Open(&appParams, &dbaParams, false)
schemaInfo.CreateOrUpdateTable(context.Background(), "test_table_01")
schemaInfo.Close()
}
func TestSchemaInfoDropTable(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -350,9 +290,7 @@ func TestSchemaInfoDropTable(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaInfo.Open(&appParams, &dbaParams, getSchemaInfoTestSchemaOverride(), false)
+ schemaInfo.Open(&appParams, &dbaParams, false)
tableInfo := schemaInfo.GetTable(existingTable)
if tableInfo == nil {
t.Fatalf("table: %s should exist", existingTable)
@@ -366,7 +304,6 @@ func TestSchemaInfoDropTable(t *testing.T) {
}
func TestSchemaInfoGetPlanPanicDuetoEmptyQuery(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -374,11 +311,7 @@ func TestSchemaInfoGetPlanPanicDuetoEmptyQuery(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 10*time.Second, 10*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaOverrides := getSchemaInfoTestSchemaOverride()
- // test cache type RW
- schemaInfo.Open(&appParams, &dbaParams, schemaOverrides, true)
+ schemaInfo.Open(&appParams, &dbaParams, true)
defer schemaInfo.Close()
ctx := context.Background()
@@ -392,7 +325,6 @@ func TestSchemaInfoGetPlanPanicDuetoEmptyQuery(t *testing.T) {
}
func TestSchemaInfoQueryCacheFailDueToInvalidCacheSize(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -400,11 +332,7 @@ func TestSchemaInfoQueryCacheFailDueToInvalidCacheSize(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 10*time.Second, 10*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaOverrides := getSchemaInfoTestSchemaOverride()
- // test cache type RW
- schemaInfo.Open(&appParams, &dbaParams, schemaOverrides, true)
+ schemaInfo.Open(&appParams, &dbaParams, true)
defer schemaInfo.Close()
defer handleAndVerifyTabletError(
t,
@@ -415,7 +343,6 @@ func TestSchemaInfoQueryCacheFailDueToInvalidCacheSize(t *testing.T) {
}
func TestSchemaInfoQueryCache(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -429,11 +356,7 @@ func TestSchemaInfoQueryCache(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 10*time.Second, 10*time.Second, true)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaOverrides := getSchemaInfoTestSchemaOverride()
- // test cache type RW
- schemaInfo.Open(&appParams, &dbaParams, schemaOverrides, true)
+ schemaInfo.Open(&appParams, &dbaParams, true)
defer schemaInfo.Close()
ctx := context.Background()
@@ -454,7 +377,6 @@ func TestSchemaInfoQueryCache(t *testing.T) {
}
func TestSchemaInfoExportVars(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -462,9 +384,7 @@ func TestSchemaInfoExportVars(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, true)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaInfo.Open(&appParams, &dbaParams, []SchemaOverride{}, true)
+ schemaInfo.Open(&appParams, &dbaParams, true)
defer schemaInfo.Close()
expvar.Do(func(kv expvar.KeyValue) {
_ = kv.Value.String()
@@ -472,7 +392,6 @@ func TestSchemaInfoExportVars(t *testing.T) {
}
func TestUpdatedMysqlStats(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -481,9 +400,7 @@ func TestUpdatedMysqlStats(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 10*time.Second, idleTimeout, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaInfo.Open(&appParams, &dbaParams, nil, true)
+ schemaInfo.Open(&appParams, &dbaParams, true)
defer schemaInfo.Close()
// Add new table
tableName := "mysql_stats_test_table"
@@ -547,7 +464,6 @@ func TestUpdatedMysqlStats(t *testing.T) {
}
func TestSchemaInfoStatsURL(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getSchemaInfoTestSupportedQueries() {
db.AddQuery(query, result)
@@ -557,9 +473,7 @@ func TestSchemaInfoStatsURL(t *testing.T) {
schemaInfo := newTestSchemaInfo(10, 1*time.Second, 1*time.Second, false)
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
- schemaInfo.cachePool.Open()
- defer schemaInfo.cachePool.Close()
- schemaInfo.Open(&appParams, &dbaParams, []SchemaOverride{}, true)
+ schemaInfo.Open(&appParams, &dbaParams, true)
defer schemaInfo.Close()
// warm up cache
ctx := context.Background()
@@ -574,10 +488,6 @@ func TestSchemaInfoStatsURL(t *testing.T) {
response = httptest.NewRecorder()
schemaInfo.ServeHTTP(response, request)
- request, _ = http.NewRequest("GET", schemaInfo.endpoints[debugTableStatsKey], nil)
- response = httptest.NewRecorder()
- schemaInfo.ServeHTTP(response, request)
-
request, _ = http.NewRequest("GET", schemaInfo.endpoints[debugSchemaKey], nil)
response = httptest.NewRecorder()
schemaInfo.ServeHTTP(response, request)
@@ -587,24 +497,6 @@ func TestSchemaInfoStatsURL(t *testing.T) {
schemaInfo.ServeHTTP(response, request)
}
-func newTestSchemaInfoCachePool(enablePublishStats bool, queryServiceStats *QueryServiceStats) *CachePool {
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- randID := rand.Int63()
- name := fmt.Sprintf("TestCachePool-%d-", randID)
- statsURL := fmt.Sprintf("/debug/cache-%d", randID)
- return NewCachePool(
- name,
- rowCacheConfig,
- 1*time.Second,
- statsURL,
- enablePublishStats,
- queryServiceStats,
- )
-}
-
func getSchemaInfoBaseTestQueries() map[string]*sqltypes.Result {
return map[string]*sqltypes.Result{
// queries for schema info
@@ -617,105 +509,6 @@ func getSchemaInfoBaseTestQueries() map[string]*sqltypes.Result {
}
}
-func getSchemaInfoTestSchemaOverride() []SchemaOverride {
- return []SchemaOverride{
- {
- Name: "test_table_01",
- PKColumns: []string{"pk"},
- Cache: &struct {
- Type string
- Table string
- }{
- Type: "RW",
- Table: "test_table_01",
- },
- },
- // this should be ignored by schema info due to unknown table
- {
- Name: "unknown_table",
- PKColumns: []string{"column_01"},
- Cache: &struct {
- Type string
- Table string
- }{
- Type: "RW",
- Table: "test_table",
- },
- },
- // this should be ignored by schema info due to invalid primary key column
- {
- Name: "test_table_01",
- PKColumns: []string{"unknown_column"},
- Cache: &struct {
- Type string
- Table string
- }{
- Type: "RW",
- Table: "test_table",
- },
- },
- {
- Name: "test_table_02",
- PKColumns: []string{"pk"},
- Cache: &struct {
- Type string
- Table string
- }{
- Type: "W",
- Table: "test_table_02",
- },
- },
- {
- Name: "test_table_02",
- PKColumns: []string{"pk"},
- Cache: &struct {
- Type string
- Table string
- }{
- Type: "W",
- // table is missing
- Table: "",
- },
- },
- {
- Name: "test_table_02",
- PKColumns: []string{"pk"},
- Cache: &struct {
- Type string
- Table string
- }{
- Type: "W",
- // table does not exist
- Table: "unknown_table",
- },
- },
- {
- Name: "test_table_02",
- PKColumns: []string{"pk"},
- Cache: &struct {
- Type string
- Table string
- }{
- Type: "W",
- // table does not have cache
- Table: "test_table_03",
- },
- },
- {
- Name: "test_table_02",
- PKColumns: []string{"pk"},
- Cache: &struct {
- Type string
- Table string
- }{
- // cache type unknown
- Type: "UNKNOWN",
- Table: "test_table_02",
- },
- },
- }
-}
-
func createTestTableBaseShowTable(tableName string) []sqltypes.Value {
return []sqltypes.Value{
sqltypes.MakeString([]byte(tableName)),
diff --git a/go/vt/tabletserver/schemaz.go b/go/vt/tabletserver/schemaz.go
index 5d0828feb8e..1374d4ec24c 100644
--- a/go/vt/tabletserver/schemaz.go
+++ b/go/vt/tabletserver/schemaz.go
@@ -78,7 +78,7 @@ func schemazHandler(tables []*schema.Table, w http.ResponseWriter, r *http.Reque
Type []string
Table *schema.Table
}{
- Type: []string{"none", "read-write", "write-only", "sequence"},
+ Type: schema.TypeNames,
}
for _, Value := range sorter.rows {
envelope.Table = Value
diff --git a/go/vt/tabletserver/schemaz_test.go b/go/vt/tabletserver/schemaz_test.go
index 25554ab431e..9a726c2dbf4 100644
--- a/go/vt/tabletserver/schemaz_test.go
+++ b/go/vt/tabletserver/schemaz_test.go
@@ -25,41 +25,24 @@ func TestSchamazHandler(t *testing.T) {
tableA.AddColumn("column1", sqltypes.Int64, sqltypes.MakeTrusted(sqltypes.Int32, []byte("0")), "auto_increment")
tableA.AddIndex("index1").AddColumn("index_column", 1000)
- tableA.Type = schema.CacheRW
+ tableA.Type = schema.NoType
tableB.AddColumn("column2", sqltypes.VarChar, sqltypes.MakeString([]byte("NULL")), "")
tableB.AddIndex("index2").AddColumn("index_column2", 200)
- tableB.Type = schema.CacheW
-
- tableC.AddColumn("column3", sqltypes.VarChar, sqltypes.MakeString([]byte("")), "")
- tableC.AddIndex("index3").AddColumn("index_column3", 500)
- tableC.Type = schema.CacheNone
+ tableB.Type = schema.Sequence
tables := []*schema.Table{
tableA, tableB, tableC,
}
schemazHandler(tables, resp, req)
body, _ := ioutil.ReadAll(resp.Body)
- tableCPattern := []string{
- `c | `,
- `column3: VARCHAR, ,
| `,
- `index3: \(index_column3,\), \(500,\)
| `,
- `none | `,
- }
- matched, err := regexp.Match(strings.Join(tableCPattern, `\s*`), body)
- if err != nil {
- t.Fatalf("schemaz page does not contain table C with error: %v", err)
- }
- if !matched {
- t.Fatalf("schemaz page does not contain table C")
- }
tableBPattern := []string{
`b | `,
`column2: VARCHAR, , NULL
| `,
`index2: \(index_column2,\), \(200,\)
| `,
- `write-only | `,
+ `sequence | `,
}
- matched, err = regexp.Match(strings.Join(tableBPattern, `\s*`), body)
+ matched, err := regexp.Match(strings.Join(tableBPattern, `\s*`), body)
if err != nil {
t.Fatalf("schemaz page does not contain table B with error: %v", err)
}
@@ -70,7 +53,7 @@ func TestSchamazHandler(t *testing.T) {
`a | `,
`column1: INT64, autoinc,
| `,
`index1: \(index_column,\), \(1000,\)
| `,
- `read-write | `,
+ `none | `,
}
matched, err = regexp.Match(strings.Join(tableAPattern, `\s*`), body)
if err != nil {
diff --git a/go/vt/tabletserver/table_info.go b/go/vt/tabletserver/table_info.go
index b958d1ac06b..83801bbe233 100644
--- a/go/vt/tabletserver/table_info.go
+++ b/go/vt/tabletserver/table_info.go
@@ -11,8 +11,6 @@ import (
"sync"
log "github.com/golang/glog"
- "github.com/youtube/vitess/go/sqltypes"
- "github.com/youtube/vitess/go/sync2"
querypb "github.com/youtube/vitess/go/vt/proto/query"
"github.com/youtube/vitess/go/vt/schema"
"golang.org/x/net/context"
@@ -22,7 +20,6 @@ import (
// It's a superset of schema.Table.
type TableInfo struct {
*schema.Table
- Cache *RowCache
// Seq must be locked before accessing the sequence vars.
// If CurVal==LastVal, we have to cache new values.
@@ -30,22 +27,17 @@ type TableInfo struct {
NextVal int64
Increment int64
LastVal int64
-
- // rowcache stats updated by query_executor.go and query_engine.go.
- hits, absent, misses, invalidations sync2.AtomicInt64
}
// NewTableInfo creates a new TableInfo.
-func NewTableInfo(conn *DBConn, tableName string, tableType string, comment string, cachePool *CachePool) (ti *TableInfo, err error) {
+func NewTableInfo(conn *DBConn, tableName string, tableType string, comment string) (ti *TableInfo, err error) {
ti, err = loadTableInfo(conn, tableName)
if err != nil {
return nil, err
}
if strings.Contains(comment, "vitess_sequence") {
ti.Type = schema.Sequence
- return ti, nil
}
- ti.initRowCache(conn, tableType, comment, cachePool)
return ti, nil
}
@@ -163,48 +155,3 @@ func (ti *TableInfo) fetchIndexes(conn *DBConn) error {
}
return nil
}
-
-func (ti *TableInfo) initRowCache(conn *DBConn, tableType string, comment string, cachePool *CachePool) {
- if cachePool.IsClosed() {
- return
- }
-
- if strings.Contains(comment, "vitess_nocache") {
- log.Infof("%s commented as vitess_nocache. Will not be cached.", ti.Name)
- return
- }
-
- if tableType == "VIEW" {
- log.Infof("%s is a view. Will not be cached.", ti.Name)
- return
- }
-
- if ti.PKColumns == nil {
- log.Infof("Table %s has no primary key. Will not be cached.", ti.Name)
- return
- }
- for _, col := range ti.PKColumns {
- if sqltypes.IsIntegral(ti.Columns[col].Type) || ti.Columns[col].Type == sqltypes.VarBinary {
- continue
- }
- log.Infof("Table %s pk has unsupported column types. Will not be cached.", ti.Name)
- return
- }
-
- ti.Type = schema.CacheRW
- ti.Cache = NewRowCache(ti, cachePool)
-}
-
-// StatsJSON returns a JSON representation of the TableInfo stats.
-func (ti *TableInfo) StatsJSON() string {
- if ti.Cache == nil {
- return fmt.Sprintf("null")
- }
- h, a, m, i := ti.Stats()
- return fmt.Sprintf("{\"Hits\": %v, \"Absent\": %v, \"Misses\": %v, \"Invalidations\": %v}", h, a, m, i)
-}
-
-// Stats returns the stats for TableInfo.
-func (ti *TableInfo) Stats() (hits, absent, misses, invalidations int64) {
- return ti.hits.Get(), ti.absent.Get(), ti.misses.Get(), ti.invalidations.Get()
-}
diff --git a/go/vt/tabletserver/table_info_test.go b/go/vt/tabletserver/table_info_test.go
index 3c887ff073b..73cab51bcea 100644
--- a/go/vt/tabletserver/table_info_test.go
+++ b/go/vt/tabletserver/table_info_test.go
@@ -6,8 +6,6 @@ package tabletserver
import (
"errors"
- "fmt"
- "math/rand"
"reflect"
"testing"
"time"
@@ -16,185 +14,30 @@ import (
"github.com/youtube/vitess/go/sqltypes"
querypb "github.com/youtube/vitess/go/vt/proto/query"
"github.com/youtube/vitess/go/vt/schema"
- "github.com/youtube/vitess/go/vt/tabletserver/fakecacheservice"
"github.com/youtube/vitess/go/vt/vttest/fakesqldb"
"golang.org/x/net/context"
)
var errRejected = errors.New("rejected")
-func TestTableInfoNew(t *testing.T) {
- fakecacheservice.Register()
- db := fakesqldb.Register()
- for query, result := range getTestTableInfoQueries() {
- db.AddQuery(query, result)
- }
- cachePool := newTestTableInfoCachePool()
- cachePool.Open()
- defer cachePool.Close()
- tableInfo, err := newTestTableInfo(cachePool, "USER_TABLE", "test table", db)
- if err != nil {
- t.Fatalf("failed to create a test table info")
- }
- if tableInfo.Cache == nil {
- t.Fatalf("rowcache should be enabled")
- }
- stats := tableInfo.StatsJSON()
- if stats == "" || stats == "null" {
- t.Fatalf("rowcache is enabled, stats should not be empty or null")
- }
-}
-
func TestTableInfoFailBecauseUnableToRetrieveTableIndex(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getTestTableInfoQueries() {
db.AddQuery(query, result)
}
db.AddRejectedQuery("show index from `test_table`", errRejected)
- cachePool := newTestTableInfoCachePool()
- cachePool.Open()
- defer cachePool.Close()
- _, err := newTestTableInfo(cachePool, "USER_TABLE", "test table", db)
+ _, err := newTestTableInfo("USER_TABLE", "test table", db)
if err == nil {
t.Fatalf("table info creation should fail because it is unable to get test_table index")
}
}
-func TestTableInfoWithoutRowCacheViaComment(t *testing.T) {
- fakecacheservice.Register()
- db := fakesqldb.Register()
- for query, result := range getTestTableInfoQueries() {
- db.AddQuery(query, result)
- }
- cachePool := newTestTableInfoCachePool()
- cachePool.Open()
- defer cachePool.Close()
- tableInfo, err := newTestTableInfo(cachePool, "USER_TABLE", "vitess_nocache", db)
- if err != nil {
- t.Fatalf("failed to create a test table info")
- }
- if tableInfo.Cache != nil {
- t.Fatalf("table info's rowcache should be disabled")
- }
- if tableInfo.StatsJSON() != "null" {
- t.Fatalf("rowcache is disabled, stats should be null")
- }
-}
-
-func TestTableInfoWithoutRowCacheViaTableType(t *testing.T) {
- fakecacheservice.Register()
- db := fakesqldb.Register()
- for query, result := range getTestTableInfoQueries() {
- db.AddQuery(query, result)
- }
- cachePool := newTestTableInfoCachePool()
- cachePool.Open()
- defer cachePool.Close()
- tableInfo, err := newTestTableInfo(cachePool, "VIEW", "test table", db)
- if err != nil {
- t.Fatalf("failed to create a test table info")
- }
- if tableInfo.Cache != nil {
- t.Fatalf("table info's rowcache should be disabled")
- }
-}
-
-func TestTableInfoWithoutRowCacheViaNoPKColumn(t *testing.T) {
- fakecacheservice.Register()
- db := fakesqldb.Register()
- db.AddQuery("show index from `test_table`", &sqltypes.Result{})
- db.AddQuery("select * from `test_table` where 1 != 1", &sqltypes.Result{
- Fields: []*querypb.Field{{
- Name: "pk",
- Type: sqltypes.Int32,
- }},
- })
- db.AddQuery("describe `test_table`", &sqltypes.Result{
- RowsAffected: 1,
- Rows: [][]sqltypes.Value{
- {
- sqltypes.MakeString([]byte("pk")),
- sqltypes.MakeString([]byte("int")),
- sqltypes.MakeString([]byte{}),
- sqltypes.MakeString([]byte{}),
- sqltypes.MakeString([]byte("1")),
- sqltypes.MakeString([]byte{}),
- },
- },
- })
-
- cachePool := newTestTableInfoCachePool()
- cachePool.Open()
- defer cachePool.Close()
- tableInfo, err := newTestTableInfo(cachePool, "USER_TABLE", "test table", db)
- if err != nil {
- t.Fatalf("failed to create a test table info")
- }
- if tableInfo.Cache != nil {
- t.Fatalf("table info's rowcache should be disabled")
- }
-}
-
-func TestTableInfoWithoutRowCacheViaUnknownPKColumnType(t *testing.T) {
- fakecacheservice.Register()
- db := fakesqldb.Register()
- db.AddQuery("show index from `test_table`", &sqltypes.Result{
- RowsAffected: 1,
- Rows: [][]sqltypes.Value{
- {
- sqltypes.MakeString([]byte{}),
- sqltypes.MakeString([]byte{}),
- sqltypes.MakeString([]byte("PRIMARY")),
- sqltypes.MakeString([]byte{}),
- sqltypes.MakeString([]byte("pk")),
- sqltypes.MakeString([]byte{}),
- sqltypes.MakeString([]byte("300")),
- },
- },
- })
- db.AddQuery("select * from `test_table` where 1 != 1", &sqltypes.Result{
- Fields: []*querypb.Field{{
- Name: "pk",
- Type: sqltypes.Decimal,
- }},
- })
- db.AddQuery("describe `test_table`", &sqltypes.Result{
- RowsAffected: 1,
- Rows: [][]sqltypes.Value{
- {
- sqltypes.MakeString([]byte("pk")),
- sqltypes.MakeString([]byte("decimal")),
- sqltypes.MakeString([]byte{}),
- sqltypes.MakeString([]byte{}),
- sqltypes.MakeString([]byte("1")),
- sqltypes.MakeString([]byte{}),
- },
- },
- })
-
- cachePool := newTestTableInfoCachePool()
- cachePool.Open()
- defer cachePool.Close()
- tableInfo, err := newTestTableInfo(cachePool, "USER_TABLE", "test table", db)
- if err != nil {
- t.Fatalf("failed to create a test table info")
- }
- if tableInfo.Cache != nil {
- t.Fatalf("table info's rowcache should be disabled")
- }
-}
-
func TestTableInfoReplacePKColumn(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getTestTableInfoQueries() {
db.AddQuery(query, result)
}
- cachePool := newTestTableInfoCachePool()
- cachePool.Open()
- defer cachePool.Close()
- tableInfo, err := newTestTableInfo(cachePool, "USER_TABLE", "test table", db)
+ tableInfo, err := newTestTableInfo("USER_TABLE", "test table", db)
if err != nil {
t.Fatalf("failed to create a table info")
}
@@ -211,7 +54,6 @@ func TestTableInfoReplacePKColumn(t *testing.T) {
}
func TestTableInfoSetPKColumn(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getTestTableInfoQueries() {
db.AddQuery(query, result)
@@ -230,10 +72,7 @@ func TestTableInfoSetPKColumn(t *testing.T) {
},
},
})
- cachePool := newTestTableInfoCachePool()
- cachePool.Open()
- defer cachePool.Close()
- tableInfo, err := newTestTableInfo(cachePool, "USER_TABLE", "test table", db)
+ tableInfo, err := newTestTableInfo("USER_TABLE", "test table", db)
if err != nil {
t.Fatalf("failed to create a table info")
}
@@ -250,7 +89,6 @@ func TestTableInfoSetPKColumn(t *testing.T) {
}
func TestTableInfoInvalidCardinalityInIndex(t *testing.T) {
- fakecacheservice.Register()
db := fakesqldb.Register()
for query, result := range getTestTableInfoQueries() {
db.AddQuery(query, result)
@@ -269,10 +107,7 @@ func TestTableInfoInvalidCardinalityInIndex(t *testing.T) {
},
},
})
- cachePool := newTestTableInfoCachePool()
- cachePool.Open()
- defer cachePool.Close()
- tableInfo, err := newTestTableInfo(cachePool, "USER_TABLE", "test table", db)
+ tableInfo, err := newTestTableInfo("USER_TABLE", "test table", db)
if err != nil {
t.Fatalf("failed to create a table info: %v", err)
}
@@ -286,7 +121,7 @@ func TestTableInfoSequence(t *testing.T) {
for query, result := range getTestTableInfoQueries() {
db.AddQuery(query, result)
}
- tableInfo, err := newTestTableInfo(nil, "USER_TABLE", "vitess_sequence", db)
+ tableInfo, err := newTestTableInfo("USER_TABLE", "vitess_sequence", db)
if err != nil {
t.Fatalf("failed to create a test table info")
}
@@ -304,7 +139,7 @@ func TestTableInfoSequence(t *testing.T) {
}
}
-func newTestTableInfo(cachePool *CachePool, tableType string, comment string, db *fakesqldb.DB) (*TableInfo, error) {
+func newTestTableInfo(tableType string, comment string, db *fakesqldb.DB) (*TableInfo, error) {
ctx := context.Background()
appParams := sqldb.ConnParams{Engine: db.Name}
dbaParams := sqldb.ConnParams{Engine: db.Name}
@@ -319,31 +154,13 @@ func newTestTableInfo(cachePool *CachePool, tableType string, comment string, db
defer conn.Recycle()
tableName := "test_table"
- tableInfo, err := NewTableInfo(conn, tableName, tableType, comment, cachePool)
+ tableInfo, err := NewTableInfo(conn, tableName, tableType, comment)
if err != nil {
return nil, err
}
return tableInfo, nil
}
-func newTestTableInfoCachePool() *CachePool {
- rowCacheConfig := RowCacheConfig{
- Binary: "ls",
- Connections: 100,
- }
- randID := rand.Int63()
- name := fmt.Sprintf("TestCachePool-TableInfo-%d-", randID)
- statsURL := fmt.Sprintf("/debug/tableinfo-cache-%d", randID)
- return NewCachePool(
- name,
- rowCacheConfig,
- 1*time.Second,
- statsURL,
- false,
- NewQueryServiceStats("", false),
- )
-}
-
func getTestTableInfoQueries() map[string]*sqltypes.Result {
return map[string]*sqltypes.Result{
"select * from `test_table` where 1 != 1": {
diff --git a/go/vt/tabletserver/tabletserver.go b/go/vt/tabletserver/tabletserver.go
index 96ed4338e84..24739c33904 100644
--- a/go/vt/tabletserver/tabletserver.go
+++ b/go/vt/tabletserver/tabletserver.go
@@ -98,14 +98,12 @@ type TabletServer struct {
// before starting the tabletserver. For backward compatibility,
// we temporarily allow them to be changed until the migration
// to the new API is complete.
- dbconfigs dbconfigs.DBConfigs
- schemaOverrides []SchemaOverride
- mysqld mysqlctl.MysqlDaemon
+ dbconfigs dbconfigs.DBConfigs
+ mysqld mysqlctl.MysqlDaemon
// The following variables should only be accessed within
// the context of a startRequest-endRequest.
- qe *QueryEngine
- invalidator *RowcacheInvalidator
+ qe *QueryEngine
// checkMySQLThrottler is used to throttle the number of
// requests sent to CheckMySQL.
@@ -149,7 +147,6 @@ func NewTabletServer(config Config) *TabletServer {
history: history.New(10),
}
tsv.qe = NewQueryEngine(tsv, config)
- tsv.invalidator = NewRowcacheInvalidator(config.StatsPrefix, tsv, tsv.qe, config.EnablePublishStats)
if config.EnablePublishStats {
stats.Publish(config.StatsPrefix+"TabletState", stats.IntFunc(func() int64 {
tsv.mu.Lock()
@@ -233,7 +230,7 @@ func (tsv *TabletServer) IsServing() bool {
// InitDBConfig inititalizes the db config variables for TabletServer. You must call this function before
// calling StartService or SetServingType.
-func (tsv *TabletServer) InitDBConfig(target querypb.Target, dbconfigs dbconfigs.DBConfigs, schemaOverrides []SchemaOverride, mysqld mysqlctl.MysqlDaemon) error {
+func (tsv *TabletServer) InitDBConfig(target querypb.Target, dbconfigs dbconfigs.DBConfigs, mysqld mysqlctl.MysqlDaemon) error {
tsv.mu.Lock()
defer tsv.mu.Unlock()
if tsv.state != StateNotConnected {
@@ -241,17 +238,16 @@ func (tsv *TabletServer) InitDBConfig(target querypb.Target, dbconfigs dbconfigs
}
tsv.target = target
tsv.dbconfigs = dbconfigs
- tsv.schemaOverrides = schemaOverrides
tsv.mysqld = mysqld
return nil
}
// StartService is a convenience function for InitDBConfig->SetServingType
// with serving=true.
-func (tsv *TabletServer) StartService(target querypb.Target, dbconfigs dbconfigs.DBConfigs, schemaOverrides []SchemaOverride, mysqld mysqlctl.MysqlDaemon) (err error) {
+func (tsv *TabletServer) StartService(target querypb.Target, dbconfigs dbconfigs.DBConfigs, mysqld mysqlctl.MysqlDaemon) (err error) {
// Save tablet type away to prevent data races
tabletType := target.TabletType
- err = tsv.InitDBConfig(target, dbconfigs, schemaOverrides, mysqld)
+ err = tsv.InitDBConfig(target, dbconfigs, mysqld)
if err != nil {
return err
}
@@ -364,7 +360,7 @@ func (tsv *TabletServer) fullStart() (err error) {
}
c.Close()
- tsv.qe.Open(tsv.dbconfigs, tsv.schemaOverrides)
+ tsv.qe.Open(tsv.dbconfigs)
return tsv.serveNewType()
}
@@ -377,24 +373,10 @@ func (tsv *TabletServer) serveNewType() (err error) {
err = x.(error)
}
}()
-
- if tsv.needInvalidator(tsv.target) {
- tsv.invalidator.Open(tsv.dbconfigs.App.DbName, tsv.mysqld)
- } else {
- tsv.invalidator.Close()
- }
tsv.transition(StateServing)
return nil
}
-// needInvalidator returns true if the rowcache invalidator needs to be enabled.
-func (tsv *TabletServer) needInvalidator(target querypb.Target) bool {
- if !tsv.config.RowCache.Enabled {
- return false
- }
- return target.TabletType != topodatapb.TabletType_MASTER
-}
-
func (tsv *TabletServer) gracefulStop() {
defer close(tsv.setTimeBomb())
tsv.waitForShutdown()
@@ -424,8 +406,6 @@ func (tsv *TabletServer) StopService() {
tsv.transition(StateNotConnected)
}()
log.Infof("Shutting down query service")
-
- tsv.invalidator.Close()
tsv.qe.Close()
}
@@ -579,7 +559,7 @@ func (tsv *TabletServer) Commit(ctx context.Context, target *querypb.Target, tra
tsv.endRequest(false)
}(time.Now())
- tsv.qe.Commit(ctx, logStats, transactionID)
+ tsv.qe.txPool.Commit(ctx, transactionID)
return nil
}
@@ -1314,16 +1294,6 @@ func (tsv *TabletServer) MaxDMLRows() int {
return int(tsv.qe.maxDMLRows.Get())
}
-// SetSpotCheckRatio sets the spot check ration.
-func (tsv *TabletServer) SetSpotCheckRatio(val float64) {
- tsv.qe.spotCheckFreq.Set(int64(val * spotCheckMultiplier))
-}
-
-// SpotCheckRatio returns the spot check ratio.
-func (tsv *TabletServer) SpotCheckRatio() float64 {
- return float64(tsv.qe.spotCheckFreq.Get()) / spotCheckMultiplier
-}
-
func init() {
rand.Seed(time.Now().UnixNano())
}
diff --git a/go/vt/tabletserver/tabletserver_test.go b/go/vt/tabletserver/tabletserver_test.go
index e2ba378a72e..7d3875fc6ef 100644
--- a/go/vt/tabletserver/tabletserver_test.go
+++ b/go/vt/tabletserver/tabletserver_test.go
@@ -63,32 +63,13 @@ func TestTabletServerAllowQueriesFailBadConn(t *testing.T) {
checkTabletServerState(t, tsv, StateNotConnected)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err == nil {
t.Fatalf("TabletServer.StartService should fail")
}
checkTabletServerState(t, tsv, StateNotConnected)
}
-func TestTabletServerAllowQueriesFailStrictModeConflictWithRowCache(t *testing.T) {
- db := setUpTabletServerTest()
- testUtils := newTestUtils()
- config := testUtils.newQueryServiceConfig()
- // disable strict mode
- config.StrictMode = false
- // enable rowcache
- config.RowCache.Enabled = true
- tsv := NewTabletServer(config)
- checkTabletServerState(t, tsv, StateNotConnected)
- dbconfigs := testUtils.newDBConfigs(db)
- target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
- if err == nil {
- t.Fatalf("TabletServer.StartService should fail because strict mode is disabled while rowcache is enabled.")
- }
- checkTabletServerState(t, tsv, StateNotConnected)
-}
-
func TestTabletServerAllowQueries(t *testing.T) {
db := setUpTabletServerTest()
testUtils := newTestUtils()
@@ -98,14 +79,14 @@ func TestTabletServerAllowQueries(t *testing.T) {
dbconfigs := testUtils.newDBConfigs(db)
tsv.setState(StateServing)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
tsv.StopService()
want := "InitDBConfig failed"
if err == nil || !strings.Contains(err.Error(), want) {
t.Fatalf("TabletServer.StartService: %v, must contain %s", err, want)
}
tsv.setState(StateShuttingDown)
- err = tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err = tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err == nil {
t.Fatalf("TabletServer.StartService should fail")
}
@@ -120,13 +101,13 @@ func TestTabletServerInitDBConfig(t *testing.T) {
tsv.setState(StateServing)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
dbconfigs := testUtils.newDBConfigs(db)
- err := tsv.InitDBConfig(target, dbconfigs, nil, nil)
+ err := tsv.InitDBConfig(target, dbconfigs, nil)
want := "InitDBConfig failed"
if err == nil || !strings.Contains(err.Error(), want) {
t.Errorf("InitDBConfig: %v, must contain %s", err, want)
}
tsv.setState(StateNotConnected)
- err = tsv.InitDBConfig(target, dbconfigs, nil, nil)
+ err = tsv.InitDBConfig(target, dbconfigs, nil)
if err != nil {
t.Error(err)
}
@@ -139,7 +120,7 @@ func TestDecideAction(t *testing.T) {
tsv := NewTabletServer(config)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
dbconfigs := testUtils.newDBConfigs(db)
- err := tsv.InitDBConfig(target, dbconfigs, nil, nil)
+ err := tsv.InitDBConfig(target, dbconfigs, nil)
if err != nil {
t.Error(err)
}
@@ -245,7 +226,7 @@ func TestSetServingType(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.InitDBConfig(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.InitDBConfig(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Error(err)
}
@@ -344,7 +325,7 @@ func TestTabletServerSingleSchemaFailure(t *testing.T) {
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
originalSchemaErrorCount := tsv.qe.queryServiceStats.InternalErrors.Counts()["Schema"]
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
defer tsv.StopService()
if err != nil {
t.Fatalf("TabletServer should successfully start even if a table's schema is unloadable, but got error: %v", err)
@@ -391,7 +372,7 @@ func TestTabletServerAllSchemaFailure(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
defer tsv.StopService()
// tabletsever shouldn't start if it can't access schema for any tables
testUtils.checkTabletError(t, err, vtrpcpb.ErrorCode_INTERNAL_ERROR, "could not get schema for any tables")
@@ -404,7 +385,7 @@ func TestTabletServerCheckMysql(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
defer tsv.StopService()
if err != nil {
t.Fatal(err)
@@ -432,7 +413,7 @@ func TestTabletServerCheckMysqlFailInvalidConn(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
defer tsv.StopService()
if err != nil {
t.Fatalf("TabletServer.StartService should succeed, but got error: %v", err)
@@ -480,7 +461,7 @@ func TestTabletServerReconnect(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
defer tsv.StopService()
if tsv.GetState() != "SERVING" {
@@ -507,7 +488,7 @@ func TestTabletServerReconnect(t *testing.T) {
// make mysql conn work
db.DisableConnFail()
- err = tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err = tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Error(err)
}
@@ -528,7 +509,7 @@ func TestTabletServerTarget(t *testing.T) {
Shard: "test_shard",
TabletType: topodatapb.TabletType_MASTER,
}
- err := tsv.StartService(target1, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target1, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -604,7 +585,7 @@ func TestTabletServerCommitTransaciton(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -638,7 +619,7 @@ func TestTabletServerRollback(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -673,7 +654,7 @@ func TestTabletServerStreamExecute(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -699,7 +680,7 @@ func TestTabletServerExecuteBatch(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -723,7 +704,7 @@ func TestTabletServerExecuteBatchFailEmptyQueryList(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -740,7 +721,7 @@ func TestTabletServerExecuteBatchFailAsTransaction(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -764,7 +745,7 @@ func TestTabletServerExecuteBatchBeginFail(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -789,7 +770,7 @@ func TestTabletServerExecuteBatchCommitFail(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -827,7 +808,7 @@ func TestTabletServerExecuteBatchSqlExecFailInTransaction(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -869,7 +850,7 @@ func TestTabletServerExecuteBatchSqlSucceedInTransaction(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -892,7 +873,7 @@ func TestTabletServerExecuteBatchCallCommitWithoutABegin(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -921,7 +902,7 @@ func TestExecuteBatchNestedTransaction(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -984,7 +965,7 @@ func TestTabletServerSplitQuery(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -1016,7 +997,7 @@ func TestTabletServerSplitQueryV2(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_RDONLY}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -1070,7 +1051,7 @@ func TestTabletServerSplitQueryInvalidQuery(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -1091,7 +1072,7 @@ func TestTabletServerSplitQueryV2InvalidQuery(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_RDONLY}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -1148,7 +1129,7 @@ func TestTabletServerSplitQueryInvalidMinMax(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -1169,7 +1150,7 @@ func TestTabletServerSplitQueryV2InvalidParams(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_RDONLY}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -1288,7 +1269,7 @@ func TestConfigChanges(t *testing.T) {
tsv := NewTabletServer(config)
dbconfigs := testUtils.newDBConfigs(db)
target := querypb.Target{TabletType: topodatapb.TabletType_MASTER}
- err := tsv.StartService(target, dbconfigs, []SchemaOverride{}, testUtils.newMysqld(&dbconfigs))
+ err := tsv.StartService(target, dbconfigs, testUtils.newMysqld(&dbconfigs))
if err != nil {
t.Fatalf("StartService failed: %v", err)
}
@@ -1362,36 +1343,6 @@ func TestConfigChanges(t *testing.T) {
if val := int(tsv.qe.maxDMLRows.Get()); val != newSize {
t.Errorf("tsv.qe.maxDMLRows.Get: %d, want %d", val, newSize)
}
-
- tsv.SetSpotCheckRatio(0.5)
- if val := tsv.SpotCheckRatio(); val != 0.5 {
- t.Errorf("tsv.SpotCheckRatio: %f, want 0.5", val)
- }
- if val := tsv.qe.spotCheckFreq.Get(); val != int64(0.5*spotCheckMultiplier) {
- t.Errorf("tsv.qe.spotCheckFreq.Get: %d, want %d", val, int64(0.5*spotCheckMultiplier))
- }
-}
-
-func TestNeedInvalidator(t *testing.T) {
- testUtils := newTestUtils()
- config := testUtils.newQueryServiceConfig()
- tsv := NewTabletServer(config)
-
- tsv.config.RowCache.Enabled = false
- target := querypb.Target{TabletType: topodatapb.TabletType_REPLICA}
- if tsv.needInvalidator(target) {
- t.Errorf("got true, want false")
- }
-
- tsv.config.RowCache.Enabled = true
- if !tsv.needInvalidator(target) {
- t.Errorf("got false, want true")
- }
-
- target.TabletType = topodatapb.TabletType_MASTER
- if tsv.needInvalidator(target) {
- t.Errorf("got true, want false")
- }
}
func setUpTabletServerTest() *fakesqldb.DB {
diff --git a/go/vt/tabletserver/tabletservermock/controller.go b/go/vt/tabletserver/tabletservermock/controller.go
index 7b2090244eb..e02ffb7fcd7 100644
--- a/go/vt/tabletserver/tabletservermock/controller.go
+++ b/go/vt/tabletserver/tabletservermock/controller.go
@@ -88,7 +88,7 @@ func (tqsc *Controller) AddStatusPart() {
}
// InitDBConfig is part of the tabletserver.Controller interface
-func (tqsc *Controller) InitDBConfig(target querypb.Target, dbConfigs dbconfigs.DBConfigs, schemaOverrides []tabletserver.SchemaOverride, mysqld mysqlctl.MysqlDaemon) error {
+func (tqsc *Controller) InitDBConfig(target querypb.Target, dbConfigs dbconfigs.DBConfigs, mysqld mysqlctl.MysqlDaemon) error {
tqsc.mu.Lock()
defer tqsc.mu.Unlock()
diff --git a/go/vt/tabletserver/testutils_test.go b/go/vt/tabletserver/testutils_test.go
index 878c9064493..082feb5549c 100644
--- a/go/vt/tabletserver/testutils_test.go
+++ b/go/vt/tabletserver/testutils_test.go
@@ -111,11 +111,8 @@ func (util *testUtils) newQueryServiceConfig() Config {
config := DefaultQsConfig
config.StatsPrefix = fmt.Sprintf("Stats-%d-", randID)
config.DebugURLPrefix = fmt.Sprintf("/debug-%d-", randID)
- config.RowCache.StatsPrefix = fmt.Sprintf("Stats-%d-", randID)
config.PoolNamePrefix = fmt.Sprintf("Pool-%d-", randID)
config.StrictMode = true
- config.RowCache.Binary = "ls"
- config.RowCache.Connections = 100
config.EnablePublishStats = false
return config
}
@@ -145,11 +142,9 @@ func newTestSchemaInfo(
queryCacheSize,
reloadTime,
idleTimeout,
- newTestSchemaInfoCachePool(enablePublishStats, queryServiceStats),
map[string]string{
debugQueryPlansKey: fmt.Sprintf("/debug/query_plans_%d", randID),
debugQueryStatsKey: fmt.Sprintf("/debug/query_stats_%d", randID),
- debugTableStatsKey: fmt.Sprintf("/debug/table_stats_%d", randID),
debugSchemaKey: fmt.Sprintf("/debug/schema_%d", randID),
},
enablePublishStats,
diff --git a/go/vt/tabletserver/tx_pool.go b/go/vt/tabletserver/tx_pool.go
index a66e4d84e4c..dd369883784 100644
--- a/go/vt/tabletserver/tx_pool.go
+++ b/go/vt/tabletserver/tx_pool.go
@@ -170,22 +170,15 @@ func (axp *TxPool) Begin(ctx context.Context) int64 {
return transactionID
}
-// SafeCommit commits the specified transaction. Unlike other functions, it
-// returns an error on failure instead of panic. The connection becomes free
-// and can be reused in the future.
-func (axp *TxPool) SafeCommit(ctx context.Context, transactionID int64) (invalidList map[string]DirtyKeys, err error) {
- defer handleError(&err, nil, axp.queryServiceStats)
-
+// Commit commits the specified transaction.
+func (axp *TxPool) Commit(ctx context.Context, transactionID int64) {
conn := axp.Get(transactionID)
defer conn.discard(TxCommit)
- // Assign this upfront to make sure we always return the invalidList.
- invalidList = conn.dirtyTables
axp.txStats.Add("Completed", time.Now().Sub(conn.StartTime))
- if _, fetchErr := conn.Exec(ctx, "commit", 1, false); fetchErr != nil {
+ if _, err := conn.Exec(ctx, "commit", 1, false); err != nil {
conn.Close()
- err = NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, fetchErr)
+ panic(NewTabletErrorSQL(vtrpcpb.ErrorCode_UNKNOWN_ERROR, err))
}
- return
}
// Rollback rolls back the specified transaction.
@@ -235,8 +228,7 @@ func (axp *TxPool) SetTimeout(timeout time.Duration) {
axp.ticks.SetInterval(timeout / 10)
}
-// TxConnection is meant for executing transactions. It keeps track
-// of dirty keys for rowcache invalidation. It can return itself to
+// TxConnection is meant for executing transactions. It can return itself to
// the tx pool correctly. It also does not retry statements if there
// are failures.
type TxConnection struct {
@@ -246,7 +238,6 @@ type TxConnection struct {
inUse bool
StartTime time.Time
EndTime time.Time
- dirtyTables map[string]DirtyKeys
Queries []string
Conclusion string
LogToFile sync2.AtomicInt32
@@ -260,24 +251,12 @@ func newTxConnection(conn *DBConn, transactionID int64, pool *TxPool, immediate
TransactionID: transactionID,
pool: pool,
StartTime: time.Now(),
- dirtyTables: make(map[string]DirtyKeys),
Queries: make([]string, 0, 8),
ImmediateCallerID: immediate,
EffectiveCallerID: effective,
}
}
-// DirtyKeys returns the list of rowcache keys that became dirty
-// during the transaction.
-func (txc *TxConnection) DirtyKeys(tableName string) DirtyKeys {
- if list, ok := txc.dirtyTables[tableName]; ok {
- return list
- }
- list := make(DirtyKeys)
- txc.dirtyTables[tableName] = list
- return list
-}
-
// Exec executes the statement for the current transaction.
func (txc *TxConnection) Exec(ctx context.Context, query string, maxrows int, wantfields bool) (*sqltypes.Result, error) {
r, err := txc.DBConn.ExecOnce(ctx, query, maxrows, wantfields)
@@ -347,12 +326,3 @@ func (txc *TxConnection) Format(params url.Values) string {
strings.Join(txc.Queries, ";"),
)
}
-
-// DirtyKeys provides a cache-like interface, where
-// it just adds keys to its likst as Delete gets called.
-type DirtyKeys map[string]bool
-
-// Delete just keeps track of what needs to be deleted
-func (dk DirtyKeys) Delete(key string) {
- dk[key] = true
-}
diff --git a/go/vt/tabletserver/tx_pool_test.go b/go/vt/tabletserver/tx_pool_test.go
index 22384da0079..1df1b0bf9e1 100644
--- a/go/vt/tabletserver/tx_pool_test.go
+++ b/go/vt/tabletserver/tx_pool_test.go
@@ -24,6 +24,7 @@ func TestTxPoolExecuteCommit(t *testing.T) {
sql := fmt.Sprintf("alter table %s add test_column int", tableName)
db := fakesqldb.Register()
db.AddQuery("begin", &sqltypes.Result{})
+ db.AddQuery("commit", &sqltypes.Result{})
db.AddQuery(sql, &sqltypes.Result{})
txPool := newTxPool(true)
@@ -35,13 +36,10 @@ func TestTxPoolExecuteCommit(t *testing.T) {
ctx := context.Background()
transactionID := txPool.Begin(ctx)
txConn := txPool.Get(transactionID)
- defer txPool.SafeCommit(ctx, transactionID)
+ defer txPool.Commit(ctx, transactionID)
txConn.RecordQuery(sql)
_, err := txConn.Exec(ctx, sql, 1, true)
txConn.Recycle()
- txConn.DirtyKeys(tableName)
- dk := txConn.DirtyKeys(tableName)
- dk.Delete(tableName)
if err != nil {
t.Fatalf("got error: %v", err)
}
@@ -186,10 +184,8 @@ func TestTxPoolSafeCommitFail(t *testing.T) {
if err != nil {
t.Fatalf("got exec error: %v", err)
}
- _, err = txPool.SafeCommit(ctx, transactionID)
- if err == nil {
- t.Fatalf("comit should get exec failure")
- }
+ defer handleAndVerifyTabletError(t, "commit should get exec failure", vtrpcpb.ErrorCode_UNKNOWN_ERROR)
+ txPool.Commit(ctx, transactionID)
}
func TestTxPoolRollbackFail(t *testing.T) {
diff --git a/go/vt/vtgate/planbuilder/dml.go b/go/vt/vtgate/planbuilder/dml.go
index 0bfa9c04f46..c7a406a89be 100644
--- a/go/vt/vtgate/planbuilder/dml.go
+++ b/go/vt/vtgate/planbuilder/dml.go
@@ -66,7 +66,7 @@ func generateQuery(statement sqlparser.Statement) string {
func isIndexChanging(setClauses sqlparser.UpdateExprs, colVindexes []*vindexes.ColumnVindex) bool {
for _, assignment := range setClauses {
for _, vcol := range colVindexes {
- if vcol.Column.Equal(cistring.CIString(assignment.Name.Name)) {
+ if vcol.Column.Equal(cistring.CIString(assignment.Name)) {
return true
}
}
diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go
index 1d553e1126d..0c42260ad02 100644
--- a/go/vt/vtgate/planbuilder/insert.go
+++ b/go/vt/vtgate/planbuilder/insert.go
@@ -109,14 +109,14 @@ func buildAutoIncrementPlan(ins *sqlparser.Insert, autoinc *vindexes.AutoIncreme
func findOrInsertPos(ins *sqlparser.Insert, col cistring.CIString) (row sqlparser.ValTuple, pos int) {
pos = -1
for i, column := range ins.Columns {
- if col.Equal(cistring.CIString(sqlparser.GetColName(column.(*sqlparser.NonStarExpr).Expr))) {
+ if col.Equal(cistring.CIString(column)) {
pos = i
break
}
}
if pos == -1 {
pos = len(ins.Columns)
- ins.Columns = append(ins.Columns, &sqlparser.NonStarExpr{Expr: &sqlparser.ColName{Name: sqlparser.ColIdent(col)}})
+ ins.Columns = append(ins.Columns, sqlparser.ColIdent(col))
ins.Rows.(sqlparser.Values)[0] = append(ins.Rows.(sqlparser.Values)[0].(sqlparser.ValTuple), &sqlparser.NullVal{})
}
return ins.Rows.(sqlparser.Values)[0].(sqlparser.ValTuple), pos
diff --git a/go/vt/vttest/environment.go b/go/vt/vttest/environment.go
index 16b8412838c..f4190be4763 100644
--- a/go/vt/vttest/environment.go
+++ b/go/vt/vttest/environment.go
@@ -21,11 +21,6 @@ func launcherPath() (string, error) {
return path.Join(vttop, "py/vttest/run_local_database.py"), nil
}
-// MemcachedPath returns the path to the memcached binary.
-func MemcachedPath() string {
- return "memcached"
-}
-
func vtgateProtocol() string {
return "grpc"
}
diff --git a/test/config.json b/test/config.json
index 91bb23a5a51..c254bc71e2d 100644
--- a/test/config.json
+++ b/test/config.json
@@ -239,15 +239,6 @@
"worker_test"
]
},
- "rowcache_invalidator": {
- "File": "rowcache_invalidator.py",
- "Args": [],
- "Command": [],
- "Manual": false,
- "Shard": 4,
- "RetryMax": 0,
- "Tags": []
- },
"schema": {
"File": "schema.py",
"Args": [],
diff --git a/test/environment.py b/test/environment.py
index a594f038dc6..3ecf9c68feb 100644
--- a/test/environment.py
+++ b/test/environment.py
@@ -28,7 +28,7 @@
# sanity check the environment
if os.environ['USER'] == 'root':
sys.stderr.write(
- 'ERROR: Vitess and its dependencies (mysqld and memcached) '
+ 'ERROR: Vitess and mysqld '
'should not be run as root.\n')
sys.exit(1)
if 'VTTOP' not in os.environ:
@@ -72,13 +72,6 @@
run_local_database = os.path.join(vtroot, 'py-vtdb', 'vttest',
'run_local_database.py')
-
-def memcached_bin():
- in_vt = os.path.join(vtroot, 'bin', 'memcached')
- if os.path.exists(in_vt):
- return in_vt
- return 'memcached'
-
# url to hit to force the logs to flush.
flush_logs_url = '/debug/flushlogs'
diff --git a/test/rowcache_invalidator.py b/test/rowcache_invalidator.py
deleted file mode 100755
index df55818a627..00000000000
--- a/test/rowcache_invalidator.py
+++ /dev/null
@@ -1,309 +0,0 @@
-#!/usr/bin/env python
-
-import warnings
-
-import json
-import time
-import urllib2
-
-import logging
-import unittest
-
-import environment
-import tablet
-import utils
-
-# Dropping a table inexplicably produces a warning despite
-# the "IF EXISTS" clause. Squelch these warnings.
-warnings.simplefilter('ignore')
-
-master_tablet = tablet.Tablet()
-replica_tablet = tablet.Tablet()
-# Second replica to provide semi-sync ACKs while testing
-# scenarios when the first replica is down.
-replica2_tablet = tablet.Tablet()
-
-all_tablets = [master_tablet, replica_tablet, replica2_tablet]
-
-create_vt_insert_test = '''create table vt_insert_test (
-id bigint auto_increment,
-msg varchar(64),
-primary key (id)
-) Engine=InnoDB'''
-
-
-def setUpModule():
- try:
- environment.topo_server().setup()
-
- # start mysql instance external to the test
- utils.wait_procs([t.init_mysql() for t in all_tablets])
-
- # start a vtctld so the vtctl insert commands are just RPCs, not forks
- utils.Vtctld().start()
-
- # Start up a master mysql and vttablet
- logging.debug('Setting up tablets')
- utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
- master_tablet.init_tablet('master', 'test_keyspace', '0')
- replica_tablet.init_tablet('replica', 'test_keyspace', '0')
- replica2_tablet.init_tablet('replica', 'test_keyspace', '0')
- utils.validate_topology()
-
- for t in all_tablets:
- t.populate('vt_test_keyspace', create_vt_insert_test)
-
- for t in all_tablets:
- t.start_vttablet(memcache=True, wait_for_state=None)
- master_tablet.wait_for_vttablet_state('SERVING')
- for t in [replica_tablet, replica2_tablet]:
- t.wait_for_vttablet_state('NOT_SERVING')
-
- utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
- master_tablet.tablet_alias], auto_log=True)
- utils.validate_topology()
-
- # restart the replica tablet so the stats are reset
- replica_tablet.kill_vttablet()
- replica_tablet.start_vttablet(memcache=True)
- except:
- tearDownModule()
- raise
-
-
-def tearDownModule():
- utils.required_teardown()
- if utils.options.skip_teardown:
- return
- logging.debug('Tearing down the servers and setup')
- tablet.kill_tablets(all_tablets)
- utils.wait_procs([t.teardown_mysql() for t in all_tablets],
- raise_on_error=False)
-
- environment.topo_server().teardown()
- utils.kill_sub_processes()
- utils.remove_tmp_files()
- for t in all_tablets:
- t.remove_tree()
-
-
-class MultiDict(dict):
-
- def __getattr__(self, name):
- v = self[name]
- if type(v) == dict:
- v = MultiDict(v)
- return v
-
- def mget(self, mkey, default=None):
- keys = mkey.split('.')
- try:
- v = self
- for key in keys:
- v = v[key]
- except KeyError:
- v = default
- if type(v) == dict:
- v = MultiDict(v)
- return v
-
-
-class RowCacheInvalidator(unittest.TestCase):
-
- def setUp(self):
- self.perform_insert(400)
-
- def tearDown(self):
- self.perform_delete()
-
- def replica_stats(self):
- url = 'http://localhost:%d/debug/table_stats' % replica_tablet.port
- return MultiDict(json.load(urllib2.urlopen(url)))
-
- def replica_vars(self):
- url = 'http://localhost:%d/debug/vars' % replica_tablet.port
- return MultiDict(json.load(urllib2.urlopen(url)))
-
- def perform_insert(self, count):
- for i in xrange(count):
- self._exec_vt_txn(
- "insert into vt_insert_test (msg) values ('test %s')" % i)
-
- def perform_delete(self):
- self._exec_vt_txn('delete from vt_insert_test')
-
- def _wait_for_replica(self):
- master_position = utils.mysql_query(master_tablet.tablet_uid,
- 'vt_test_keyspace',
- 'show master status')
- replica_tablet.mquery('vt_test_keyspace',
- "select MASTER_POS_WAIT('%s', %d)" %
- (master_position[0][0], master_position[0][1]), 5)
-
- def test_cache_invalidation(self):
- self._wait_for_replica()
- invalidations = self.replica_stats()['Totals']['Invalidations']
- invalidator_stats = self.replica_vars()
- logging.debug(
- 'Invalidations %d InvalidatorStats %s',
- invalidations, invalidator_stats['RowcacheInvalidatorPosition'])
- self.assertTrue(
- invalidations > 0, 'Invalidations are not flowing through.')
-
- res = replica_tablet.mquery('vt_test_keyspace',
- 'select min(id) from vt_insert_test')
- self.assertNotEqual(res[0][0], None,
- 'Cannot proceed, no rows in vt_insert_test')
- mid = int(res[0][0])
- stats_dict = self.replica_stats()['vt_insert_test']
- logging.debug('vt_insert_test stats %s', stats_dict)
- misses = stats_dict['Misses']
- hits = stats_dict['Hits']
- replica_tablet.execute('select * from vt_insert_test where id=:id',
- bindvars={'id': mid})
- stats_dict = self.replica_stats()['vt_insert_test']
- self.assertEqual(stats_dict['Misses'] - misses, 1,
- "This shouldn't have hit the cache")
-
- replica_tablet.execute('select * from vt_insert_test where id=:id',
- bindvars={'id': mid})
- stats_dict = self.replica_stats()['vt_insert_test']
- self.assertEqual(stats_dict['Hits'] - hits, 1,
- 'This should have hit the cache')
-
- def _wait_for_value(self, expected_result):
- timeout = 10
- while True:
- result = self._exec_replica_query(
- 'select * from vt_insert_test where id = 1000000')
- if result == expected_result:
- return
- timeout = utils.wait_step(
- 'replica rowcache updated, got %s expected %s' %
- (str(result), str(expected_result)), timeout,
- sleep_time=0.1)
-
- def test_outofband_statements(self):
- start = self.replica_vars()['InternalErrors'].get('Invalidation', 0)
-
- # Test update statement
- self._exec_vt_txn(
- "insert into vt_insert_test (id, msg) values (1000000, 'start')")
- self._wait_for_replica()
- self._wait_for_value([[1000000, 'start']])
- utils.mysql_write_query(
- master_tablet.tablet_uid,
- 'vt_test_keyspace',
- "update vt_insert_test set msg = 'foo' where id = 1000000")
- self._wait_for_replica()
- self._wait_for_value([[1000000, 'foo']])
- end1 = self.replica_vars()['InternalErrors'].get('Invalidation', 0)
- self.assertEqual(start, end1)
-
- # Test delete statement
- utils.mysql_write_query(master_tablet.tablet_uid,
- 'vt_test_keyspace',
- 'delete from vt_insert_test where id = 1000000')
- self._wait_for_replica()
- self._wait_for_value([])
- end2 = self.replica_vars()['InternalErrors'].get('Invalidation', 0)
- self.assertEqual(end1, end2)
-
- # Test insert statement
- utils.mysql_write_query(
- master_tablet.tablet_uid,
- 'vt_test_keyspace',
- "insert into vt_insert_test (id, msg) values(1000000, 'bar')")
- self._wait_for_replica()
- self._wait_for_value([[1000000, 'bar']])
- end3 = self.replica_vars()['InternalErrors'].get('Invalidation', 0)
- self.assertEqual(end2, end3)
-
- # Test unrecognized statement
- utils.mysql_query(master_tablet.tablet_uid,
- 'vt_test_keyspace',
- 'truncate table vt_insert_test')
- self._wait_for_replica()
- timeout = 10
- while True:
- end4 = self.replica_vars()['InternalErrors'].get('Invalidation', 0)
- if end4 == end3+1:
- break
- timeout = utils.wait_step('invalidation errors, got %d expecting %d' %
- (end4, end3+1), timeout, sleep_time=0.1)
- self.assertEqual(end4, end3+1)
-
- def test_stop_replication(self):
- # wait for replication to catch up.
- self._wait_for_replica()
-
- # restart the replica tablet so the stats are reset
- replica_tablet.kill_vttablet()
- replica_tablet.start_vttablet(memcache=True)
-
- # insert 100 values, should cause 100 invalidations
- self.perform_insert(100)
- self._wait_for_replica()
-
- # wait until the slave processed all data
- timeout = 30
- while True:
- inv_count1 = self.replica_stats()['Totals']['Invalidations']
- if inv_count1 == 100:
- break
- timeout = utils.wait_step('invalidation count, got %d expecting %d' %
- (inv_count1, 100), timeout, sleep_time=0.1)
-
- # stop replication insert more data, restart replication
- replica_tablet.mquery('vt_test_keyspace', 'stop slave')
- self.perform_insert(100)
- time.sleep(2)
- replica_tablet.mquery('vt_test_keyspace', 'start slave')
- self._wait_for_replica()
-
- # wait until the slave processed all data
- timeout = 30
- while True:
- inv_count2 = self.replica_stats()['Totals']['Invalidations']
- if inv_count2 == 200:
- break
- timeout = utils.wait_step('invalidation count, got %d expecting %d' %
- (inv_count2, 200), timeout, sleep_time=0.1)
-
- # check and display some stats
- invalidator_stats = self.replica_vars()
- logging.debug('invalidator_stats %s',
- invalidator_stats['RowcacheInvalidatorPosition'])
- self.assertEqual(invalidator_stats['RowcacheInvalidatorState'], 'Running',
- 'Row-cache invalidator should be enabled')
-
- def test_cache_hit(self):
- res = replica_tablet.mquery('vt_test_keyspace',
- 'select min(id) from vt_insert_test')
- self.assertNotEqual(res[0][0], None,
- 'Cannot proceed, no rows in vt_insert_test')
- mid = int(res[0][0])
- stats_dict = self.replica_stats()['vt_insert_test']
- misses = stats_dict['Misses']
- hits = stats_dict['Hits']
- replica_tablet.execute('select * from vt_insert_test where id=:id',
- bindvars={'id': mid})
- stats_dict = self.replica_stats()['vt_insert_test']
- self.assertEqual(stats_dict['Misses'] - misses, 1,
- "This shouldn't have hit the cache")
-
- replica_tablet.execute('select * from vt_insert_test where id=:id',
- bindvars={'id': mid})
- hits2 = self.replica_stats()['vt_insert_test']['Hits']
- self.assertEqual(hits2 - hits, 1, 'This should have hit the cache')
-
- def _exec_vt_txn(self, query):
- master_tablet.execute(query, auto_log=False)
-
- def _exec_replica_query(self, query):
- result = replica_tablet.execute(query, auto_log=False)
- return result['rows']
-
-
-if __name__ == '__main__':
- utils.main()
diff --git a/test/tablet.py b/test/tablet.py
index 6ac2b26ab7d..09e7f0febcd 100644
--- a/test/tablet.py
+++ b/test/tablet.py
@@ -397,7 +397,7 @@ def flush(self):
stderr=utils.devnull, stdout=utils.devnull)
def start_vttablet(
- self, port=None, memcache=False,
+ self, port=None,
wait_for_state='SERVING', filecustomrules=None, zkcustomrules=None,
schema_override=None,
repl_extra_flags=None, table_acl_config=None,
@@ -497,12 +497,6 @@ def start_vttablet(
self._add_dbconfigs(args, repl_extra_flags)
- if memcache:
- args.extend(['-rowcache-bin', environment.memcached_bin()])
- memcache_socket = os.path.join(self.tablet_dir, 'memcache.sock')
- args.extend(['-rowcache-socket', memcache_socket])
- args.extend(['-enable-rowcache'])
-
if filecustomrules:
args.extend(['-filecustomrules', filecustomrules])
if zkcustomrules: