Skip to content

Commit

Permalink
Merge #44297 #44345
Browse files Browse the repository at this point in the history
44297: roachtest: fix the deco test + re-implement the warning test r=tbg a=knz

Fixes #44276.

The test to check for a warning on double [rd]ecommission was
implemented in roachtest; this was invalid because roachtest
is also used on previous versions and the warning is a new thing.

This patch moves the test to a TCL interactive test instead.

Release note: None

44345: sql,ui: enable non-admin users to see their own jobs r=dt,spaskob a=knz

Informs #44335.

Release note (ui change): Non-admin users can now use the "Jobs
detail" page and see their own jobs.

Release note (sql change): Non-admin users can now query `SHOW JOBS`
and `crdb_internal.jobs` and see their own jobs.

Co-authored-by: Raphael 'kena' Poss <knz@thaumogen.net>
  • Loading branch information
craig[bot] and knz committed Jan 27, 2020
3 parents 91bd480 + 29a428e + 606db1d commit a3cb002
Show file tree
Hide file tree
Showing 6 changed files with 148 additions and 52 deletions.
2 changes: 1 addition & 1 deletion pkg/cli/interactive_tests/common.tcl
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ proc send_eof {} {
# in `server_pid`.
proc start_server {argv} {
report "BEGIN START SERVER"
system "$argv start-single-node --insecure --pid-file=server_pid --listening-url-file=server_url --background -s=path=logs/db >>logs/expect-cmd.log 2>&1;
system "$argv start-single-node --insecure --max-sql-memory=128MB --pid-file=server_pid --listening-url-file=server_url --background -s=path=logs/db >>logs/expect-cmd.log 2>&1;
$argv sql --insecure -e 'select 1'"
report "START SERVER DONE"
}
Expand Down
24 changes: 22 additions & 2 deletions pkg/cli/interactive_tests/test_multiple_nodes.tcl
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ start_server $argv

start_test "Check that it is possible to add nodes to a server started with start-single-node"

system "$argv start --insecure --port=26258 --http-port=8083 --pid-file=server_pid2 --background -s=path=logs/db2 --join=:26257 >>logs/expect-cmd.log 2>&1;
system "$argv start --insecure --port=26258 --max-sql-memory=128MB --http-port=8083 --pid-file=server_pid2 --background -s=path=logs/db2 --join=:26257 >>logs/expect-cmd.log 2>&1;
$argv sql -e 'select 1' --port=26258"

system "$argv start --insecure --port=26259 --http-port=8084 --pid-file=server_pid3 --background -s=path=logs/db3 --join=:26257 >>logs/expect-cmd.log 2>&1;
system "$argv start --insecure --port=26259 --max-sql-memory=128MB --http-port=8084 --pid-file=server_pid3 --background -s=path=logs/db3 --join=:26257 >>logs/expect-cmd.log 2>&1;
$argv sql -e 'select 1' --port=26259"

# Check the number of nodes
Expand All @@ -20,6 +20,26 @@ eexpect eof

end_test


start_test "Check that a double decommission prints out a warning"
spawn $argv node decommission 2
eexpect eof

spawn $argv node decommission 2
eexpect "warning: node 2 is already decommissioning or decommissioned"
eexpect eof
end_test

start_test "Check that a double recommission prints out a warning"
spawn $argv node recommission 2
eexpect eof

spawn $argv node recommission 2
eexpect "warning: node 2 is not decommissioned"
eexpect eof
end_test


# Kill the cluster. We don't care about what happens next in this test,
# and this makes the test complete faster.
system "kill -KILL `cat server_pid` `cat server_pid2` `cat server_pid3`"
18 changes: 0 additions & 18 deletions pkg/cmd/roachtest/decommission.go
Original file line number Diff line number Diff line change
Expand Up @@ -440,29 +440,11 @@ func runDecommissionAcceptance(ctx context.Context, t *test, c *cluster) {
}
}

t.l.Printf("trying to decommission a second time\n")
if buf, err := decommission(ctx, 2, c.Node(1), "decommission"); err != nil {
t.Fatalf("decommission failed: %v", err)
} else {
if !strings.HasPrefix(buf, "warning: node 1 is already decommissioning or decommissioned") {
t.Fatalf("expected warning at start of duplicate decommission, got:\n%s", buf)
}
}

t.l.Printf("recommissioning first node (from third node)\n")
if _, err := decommission(ctx, 3, c.Node(1), "recommission"); err != nil {
t.Fatalf("recommission failed: %v", err)
}

t.l.Printf("trying to recommission a second time\n")
if buf, err := decommission(ctx, 3, c.Node(1), "recommission"); err != nil {
t.Fatalf("recommission failed: %v", err)
} else {
if !strings.HasPrefix(buf, "warning: node 1 is not decommissioned") {
t.Fatalf("expected warning at start of duplicate recommission, got:\n%s", buf)
}
}

t.l.Printf("decommissioning second node from third, using --wait=all\n")
{
o, err := decommission(ctx, 3, c.Node(2),
Expand Down
72 changes: 42 additions & 30 deletions pkg/server/admin_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1250,14 +1250,16 @@ func TestAdminAPIJobs(t *testing.T) {
status jobs.Status
details jobspb.Details
progress jobspb.ProgressDetails
username string
}{
{1, jobs.StatusRunning, jobspb.RestoreDetails{}, jobspb.RestoreProgress{}},
{2, jobs.StatusRunning, jobspb.BackupDetails{}, jobspb.BackupProgress{}},
{3, jobs.StatusSucceeded, jobspb.BackupDetails{}, jobspb.BackupProgress{}},
{4, jobs.StatusRunning, jobspb.ChangefeedDetails{}, jobspb.ChangefeedProgress{}},
{1, jobs.StatusRunning, jobspb.RestoreDetails{}, jobspb.RestoreProgress{}, security.RootUser},
{2, jobs.StatusRunning, jobspb.BackupDetails{}, jobspb.BackupProgress{}, security.RootUser},
{3, jobs.StatusSucceeded, jobspb.BackupDetails{}, jobspb.BackupProgress{}, security.RootUser},
{4, jobs.StatusRunning, jobspb.ChangefeedDetails{}, jobspb.ChangefeedProgress{}, security.RootUser},
{5, jobs.StatusSucceeded, jobspb.BackupDetails{}, jobspb.BackupProgress{}, authenticatedUserNameNoAdmin},
}
for _, job := range testJobs {
payload := jobspb.Payload{Details: jobspb.WrapPayloadDetails(job.details)}
payload := jobspb.Payload{Username: job.username, Details: jobspb.WrapPayloadDetails(job.details)}
payloadBytes, err := protoutil.Marshal(&payload)
if err != nil {
t.Fatal(err)
Expand Down Expand Up @@ -1289,33 +1291,43 @@ func TestAdminAPIJobs(t *testing.T) {
const invalidJobType = math.MaxInt32

testCases := []struct {
uri string
expectedIDs []int64
uri string
expectedIDsViaAdmin []int64
expectedIDsViaNonAdmin []int64
}{
{"jobs", append([]int64{4, 3, 2, 1}, existingIDs...)},
{"jobs?limit=1", []int64{4}},
{"jobs?status=running", []int64{4, 2, 1}},
{"jobs?status=succeeded", append([]int64{3}, existingIDs...)},
{"jobs?status=pending", []int64{}},
{"jobs?status=garbage", []int64{}},
{fmt.Sprintf("jobs?type=%d", jobspb.TypeBackup), []int64{3, 2}},
{fmt.Sprintf("jobs?type=%d", jobspb.TypeRestore), []int64{1}},
{fmt.Sprintf("jobs?type=%d", invalidJobType), []int64{}},
{fmt.Sprintf("jobs?status=running&type=%d", jobspb.TypeBackup), []int64{2}},
}
for i, testCase := range testCases {
var res serverpb.JobsResponse
if err := getAdminJSONProto(s, testCase.uri, &res); err != nil {
t.Fatal(err)
}
resIDs := []int64{}
for _, job := range res.Jobs {
resIDs = append(resIDs, job.ID)
}
if e, a := testCase.expectedIDs, resIDs; !reflect.DeepEqual(e, a) {
t.Errorf("%d: expected job IDs %v, but got %v", i, e, a)
{"jobs", append([]int64{5, 4, 3, 2, 1}, existingIDs...), []int64{5}},
{"jobs?limit=1", []int64{5}, []int64{5}},
{"jobs?status=running", []int64{4, 2, 1}, []int64{}},
{"jobs?status=succeeded", append([]int64{5, 3}, existingIDs...), []int64{5}},
{"jobs?status=pending", []int64{}, []int64{}},
{"jobs?status=garbage", []int64{}, []int64{}},
{fmt.Sprintf("jobs?type=%d", jobspb.TypeBackup), []int64{5, 3, 2}, []int64{5}},
{fmt.Sprintf("jobs?type=%d", jobspb.TypeRestore), []int64{1}, []int64{}},
{fmt.Sprintf("jobs?type=%d", invalidJobType), []int64{}, []int64{}},
{fmt.Sprintf("jobs?status=running&type=%d", jobspb.TypeBackup), []int64{2}, []int64{}},
}

testutils.RunTrueAndFalse(t, "isAdmin", func(t *testing.T, isAdmin bool) {
for i, testCase := range testCases {
var res serverpb.JobsResponse
if err := getAdminJSONProtoWithAdminOption(s, testCase.uri, &res, isAdmin); err != nil {
t.Fatal(err)
}
resIDs := []int64{}
for _, job := range res.Jobs {
resIDs = append(resIDs, job.ID)
}

expected := testCase.expectedIDsViaAdmin
if !isAdmin {
expected = testCase.expectedIDsViaNonAdmin
}

if e, a := expected, resIDs; !reflect.DeepEqual(e, a) {
t.Errorf("%d: expected job IDs %v, but got %v", i, e, a)
}
}
}
})
}

func TestAdminAPILocations(t *testing.T) {
Expand Down
20 changes: 19 additions & 1 deletion pkg/sql/crdb_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -453,10 +453,18 @@ CREATE TABLE crdb_internal.jobs (
coordinator_id INT
)`,
populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error {
currentUser := p.SessionData().User
isAdmin, err := p.HasAdminRole(ctx)
if err != nil {
return err
}

// Beware: we're querying system.jobs as root; we need to be careful to filter
// out results that the current user is not able to see.
query := `SELECT id, status, created, payload, progress FROM system.jobs`
rows, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryEx(
ctx, "crdb-internal-jobs-table", p.txn,
sqlbase.InternalExecutorSessionDataOverride{User: p.SessionData().User},
sqlbase.InternalExecutorSessionDataOverride{User: security.RootUser},
query)
if err != nil {
return err
Expand All @@ -472,6 +480,16 @@ CREATE TABLE crdb_internal.jobs (

// Extract data from the payload.
payload, err := jobs.UnmarshalPayload(payloadBytes)

// We filter out masked rows before we allocate all the
// datums. Needless allocate when not necessary.
sameUser := payload != nil && payload.Username == currentUser
if canAccess := isAdmin || sameUser; !canAccess {
// This user is neither an admin nor the user who created the
// job. They cannot see this row.
continue
}

if err != nil {
errorStr = tree.NewDString(fmt.Sprintf("error decoding payload: %v", err))
} else {
Expand Down
64 changes: 64 additions & 0 deletions pkg/sql/logictest/testdata/logic_test/jobs
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# These test verify that a user's job are visible via
# crdb_internal.jobs and SHOW JOBS.

user root

statement ok
GRANT ALL ON DATABASE test TO testuser

statement ok
CREATE TABLE t(x INT); INSERT INTO t(x) VALUES (1); CREATE INDEX ON t(x)

query TTT
SELECT job_type, description, user_name FROM [SHOW JOBS]
----
SCHEMA CHANGE CREATE INDEX ON test.public.t (x) root

query TTT
SELECT job_type, description, user_name FROM crdb_internal.jobs
----
SCHEMA CHANGE CREATE INDEX ON test.public.t (x) root

user testuser

# a non-admin user cannot see the admin jobs

query TTT
SELECT job_type, description, user_name FROM [SHOW JOBS]
----

query TTT
SELECT job_type, description, user_name FROM crdb_internal.jobs
----

# However they can see their own jobs.

statement ok
CREATE TABLE u(x INT); INSERT INTO u(x) VALUES (1); CREATE INDEX ON u(x);


query TTT
SELECT job_type, description, user_name FROM [SHOW JOBS]
----
SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser

query TTT
SELECT job_type, description, user_name FROM crdb_internal.jobs
----
SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser

# And root can see both.

user root

query TTT
SELECT job_type, description, user_name FROM [SHOW JOBS]
----
SCHEMA CHANGE CREATE INDEX ON test.public.t (x) root
SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser

query TTT
SELECT job_type, description, user_name FROM crdb_internal.jobs
----
SCHEMA CHANGE CREATE INDEX ON test.public.t (x) root
SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser

0 comments on commit a3cb002

Please sign in to comment.