Skip to content

Commit

Permalink
Merge branch '1.9' into 1.10
Browse files Browse the repository at this point in the history
  • Loading branch information
locker committed Aug 27, 2018
2 parents 2011b26 + 87f9be4 commit b43c89b
Show file tree
Hide file tree
Showing 8 changed files with 186 additions and 3 deletions.
3 changes: 2 additions & 1 deletion src/box/applier.cc
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,8 @@ applier_f(va_list ap)
applier_log_error(applier, e);
applier_disconnect(applier, APPLIER_LOADING);
goto reconnect;
} else if (e->errcode() == ER_ACCESS_DENIED) {
} else if (e->errcode() == ER_ACCESS_DENIED ||
e->errcode() == ER_NO_SUCH_USER) {
/* Invalid configuration */
applier_log_error(applier, e);
applier_disconnect(applier, APPLIER_DISCONNECTED);
Expand Down
4 changes: 2 additions & 2 deletions src/box/lua/tuple.c
Original file line number Diff line number Diff line change
Expand Up @@ -296,12 +296,12 @@ lbox_tuple_to_map(struct lua_State *L)

const struct tuple *tuple = lua_checktuple(L, 1);
const struct tuple_format *format = tuple_format(tuple);
const struct tuple_field *field = &format->fields[0];
const char *pos = tuple_data(tuple);
int field_count = (int)mp_decode_array(&pos);
int n_named = format->dict->name_count;
lua_createtable(L, field_count, n_named);
for (int i = 0; i < n_named; ++i, ++field) {
int named_and_presented = MIN(field_count, n_named);
for (int i = 0; i < named_and_presented; ++i) {
/* Access by name. */
const char *name = format->dict->names[i];
lua_pushstring(L, name);
Expand Down
5 changes: 5 additions & 0 deletions src/lua/socket.c
Original file line number Diff line number Diff line change
Expand Up @@ -677,6 +677,11 @@ static int
lbox_socket_push_addr(struct lua_State *L,
const struct sockaddr *addr, socklen_t alen)
{
if (alen == 0) {
lua_pushnil(L);
return 1;
}

lua_newtable(L);

lua_pushliteral(L, "family");
Expand Down
63 changes: 63 additions & 0 deletions test/engine/tuple.result
Original file line number Diff line number Diff line change
Expand Up @@ -891,6 +891,69 @@ t["{"]
s:drop()
---
...
--
-- gh-3631: Wrong 'tomap' work with nullable fields
--
format = {}
---
...
format[1] = {'first', 'unsigned'}
---
...
format[2] = {'second', 'unsigned'}
---
...
format[3] = {'third', 'unsigned'}
---
...
format[4] = {'fourth', 'string', is_nullable = true}
---
...
s = box.schema.space.create('test', {format = format, engine = engine})
---
...
pk = s:create_index('primary', {parts = {1, 'unsigned'}})
---
...
s:insert({1, 2, 3})
---
- [1, 2, 3]
...
tuple = s:get(1)
---
...
tuple
---
- [1, 2, 3]
...
-- Should be NULL
tuple.fourth
---
- null
...
-- Should have only three named fields
tuple:tomap()
---
- 1: 1
2: 2
3: 3
third: 3
second: 2
first: 1
...
-- Should be NULL
tuple:tomap().fourth
---
- null
...
-- Should be nil
type(tuple:tomap().fourth)
---
- nil
...
s:drop()
---
...
engine = nil
---
...
Expand Down
23 changes: 23 additions & 0 deletions test/engine/tuple.test.lua
Original file line number Diff line number Diff line change
Expand Up @@ -289,5 +289,28 @@ t["{"]

s:drop()

--
-- gh-3631: Wrong 'tomap' work with nullable fields
--
format = {}
format[1] = {'first', 'unsigned'}
format[2] = {'second', 'unsigned'}
format[3] = {'third', 'unsigned'}
format[4] = {'fourth', 'string', is_nullable = true}
s = box.schema.space.create('test', {format = format, engine = engine})
pk = s:create_index('primary', {parts = {1, 'unsigned'}})
s:insert({1, 2, 3})
tuple = s:get(1)
tuple
-- Should be NULL
tuple.fourth
-- Should have only three named fields
tuple:tomap()
-- Should be NULL
tuple:tomap().fourth
-- Should be nil
type(tuple:tomap().fourth)
s:drop()

engine = nil
test_run = nil
52 changes: 52 additions & 0 deletions test/replication/misc.result
Original file line number Diff line number Diff line change
Expand Up @@ -264,3 +264,55 @@ test_run:cmd('cleanup server er_load2')
---
- true
...
--
-- Test case for gh-3637. Before the fix replica would exit with
-- an error. Now check that we don't hang and successfully connect.
--
fiber = require('fiber')
---
...
test_run:cleanup_cluster()
---
...
test_run:cmd("create server replica_auth with rpl_master=default, script='replication/replica_auth.lua'")
---
- true
...
test_run:cmd("start server replica_auth with wait=False, wait_load=False, args='cluster:pass 0.05'")
---
- true
...
-- Wait a bit to make sure replica waits till user is created.
fiber.sleep(0.1)
---
...
box.schema.user.create('cluster', {password='pass'})
---
...
box.schema.user.grant('cluster', 'replication')
---
...
while box.info.replication[2] == nil do fiber.sleep(0.01) end
---
...
vclock = test_run:get_vclock('default')
---
...
_ = test_run:wait_vclock('replica_auth', vclock)
---
...
test_run:cmd("stop server replica_auth")
---
- true
...
test_run:cmd("cleanup server replica_auth")
---
- true
...
test_run:cmd("delete server replica_auth")
---
- true
...
box.schema.user.drop('cluster')
---
...
25 changes: 25 additions & 0 deletions test/replication/misc.test.lua
Original file line number Diff line number Diff line change
Expand Up @@ -103,3 +103,28 @@ test_run:cmd('stop server er_load1')
-- er_load2 exits automatically.
test_run:cmd('cleanup server er_load1')
test_run:cmd('cleanup server er_load2')

--
-- Test case for gh-3637. Before the fix replica would exit with
-- an error. Now check that we don't hang and successfully connect.
--
fiber = require('fiber')

test_run:cleanup_cluster()

test_run:cmd("create server replica_auth with rpl_master=default, script='replication/replica_auth.lua'")
test_run:cmd("start server replica_auth with wait=False, wait_load=False, args='cluster:pass 0.05'")
-- Wait a bit to make sure replica waits till user is created.
fiber.sleep(0.1)
box.schema.user.create('cluster', {password='pass'})
box.schema.user.grant('cluster', 'replication')

while box.info.replication[2] == nil do fiber.sleep(0.01) end
vclock = test_run:get_vclock('default')
_ = test_run:wait_vclock('replica_auth', vclock)

test_run:cmd("stop server replica_auth")
test_run:cmd("cleanup server replica_auth")
test_run:cmd("delete server replica_auth")

box.schema.user.drop('cluster')
14 changes: 14 additions & 0 deletions test/replication/replica_auth.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#!/usr/bin/env tarantool

local USER_PASS = arg[1]
local TIMEOUT = arg[2] and tonumber(arg[2]) or 0.1
local CON_TIMEOUT = arg[3] and tonumber(arg[3]) or 30.0

require('console').listen(os.getenv('ADMIN'))

box.cfg({
listen = os.getenv("LISTEN"),
replication = USER_PASS .. "@" .. os.getenv("MASTER"),
replication_timeout = TIMEOUT,
replication_connect_timeout = CON_TIMEOUT
})

0 comments on commit b43c89b

Please sign in to comment.