Skip to content
This repository has been archived by the owner on Jul 16, 2021. It is now read-only.

Commit

Permalink
{List,Create,Change}InputLogs Admin APIs (#1250)
Browse files Browse the repository at this point in the history
* Remove reference to deleted types.proto

* go generate ./...

* New {List,Create,Update}InputLogs Admin APIs

* Don't loose gRPC error codes

* Mutations SetWritable

* AdminServer implementation of *InputLogs APIs
  • Loading branch information
gdbelvin committed Mar 29, 2019
1 parent b6643f0 commit 65451bd
Show file tree
Hide file tree
Showing 13 changed files with 857 additions and 83 deletions.
92 changes: 65 additions & 27 deletions core/adminserver/admin_server.go
Expand Up @@ -83,8 +83,14 @@ var (
type LogsAdmin interface { type LogsAdmin interface {
// AddLogs creates and adds new logs for writing to a directory. // AddLogs creates and adds new logs for writing to a directory.
AddLogs(ctx context.Context, directoryID string, logIDs ...int64) error AddLogs(ctx context.Context, directoryID string, logIDs ...int64) error
// SetWritable enables or disables new writes from going to logID.
SetWritable(ctx context.Context, directoryID string, logID int64, enabled bool) error
// ListLogs returns a list of logs, optionally filtered by the writable bit.
ListLogs(ctx context.Context, directoryID string, writable bool) ([]int64, error)
} }


var _ pb.KeyTransparencyAdminServer = &Server{} // Ensure *Server satisfies the AdminServer interface.

// Server implements pb.KeyTransparencyAdminServer // Server implements pb.KeyTransparencyAdminServer
type Server struct { type Server struct {
tlog tpb.TrillianLogClient tlog tpb.TrillianLogClient
Expand Down Expand Up @@ -175,7 +181,7 @@ func privKeyOrGen(ctx context.Context, privKey *any.Any, keygen keys.ProtoGenera
if privKey != nil { if privKey != nil {
var keyProto ptypes.DynamicAny var keyProto ptypes.DynamicAny
if err := ptypes.UnmarshalAny(privKey, &keyProto); err != nil { if err := ptypes.UnmarshalAny(privKey, &keyProto); err != nil {
return nil, fmt.Errorf("failed to unmarshal privatekey: %v", err) return nil, status.Errorf(codes.InvalidArgument, "failed to unmarshal privatekey: %v", err)
} }
return keyProto.Message, nil return keyProto.Message, nil
} }
Expand Down Expand Up @@ -212,12 +218,12 @@ func (s *Server) CreateDirectory(ctx context.Context, in *pb.CreateDirectoryRequ


// Generate VRF key. // Generate VRF key.
wrapped, err := privKeyOrGen(ctx, in.GetVrfPrivateKey(), s.keygen) wrapped, err := privKeyOrGen(ctx, in.GetVrfPrivateKey(), s.keygen)
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return nil, fmt.Errorf("adminserver: keygen(): %v", err) return nil, status.Errorf(s.Code(), "adminserver: keygen(): %v", s.Message())
} }
vrfPriv, err := p256.NewFromWrappedKey(ctx, wrapped) vrfPriv, err := p256.NewFromWrappedKey(ctx, wrapped)
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return nil, fmt.Errorf("adminserver: NewFromWrappedKey(): %v", err) return nil, status.Errorf(s.Code(), "adminserver: NewFromWrappedKey(): %v", s.Message())
} }
vrfPublicPB, err := der.ToPublicProto(vrfPriv.Public()) vrfPublicPB, err := der.ToPublicProto(vrfPriv.Public())
if err != nil { if err != nil {
Expand All @@ -227,8 +233,8 @@ func (s *Server) CreateDirectory(ctx context.Context, in *pb.CreateDirectoryRequ
// Create Trillian keys. // Create Trillian keys.
logTreeArgs := treeConfig(logArgs, in.GetLogPrivateKey(), in.GetDirectoryId()) logTreeArgs := treeConfig(logArgs, in.GetLogPrivateKey(), in.GetDirectoryId())
logTree, err := client.CreateAndInitTree(ctx, logTreeArgs, s.logAdmin, s.tmap, s.tlog) logTree, err := client.CreateAndInitTree(ctx, logTreeArgs, s.logAdmin, s.tmap, s.tlog)
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return nil, fmt.Errorf("adminserver: CreateTree(log): %v", err) return nil, status.Errorf(s.Code(), "adminserver: CreateTree(log): %v", s.Message())
} }
mapTreeArgs := treeConfig(mapArgs, in.GetMapPrivateKey(), in.GetDirectoryId()) mapTreeArgs := treeConfig(mapArgs, in.GetMapPrivateKey(), in.GetDirectoryId())
mapTree, err := client.CreateAndInitTree(ctx, mapTreeArgs, s.mapAdmin, s.tmap, s.tlog) mapTree, err := client.CreateAndInitTree(ctx, mapTreeArgs, s.mapAdmin, s.tmap, s.tlog)
Expand All @@ -240,12 +246,12 @@ func (s *Server) CreateDirectory(ctx context.Context, in *pb.CreateDirectoryRequ
return nil, status.Errorf(codes.Internal, "adminserver: CreateAndInitTree(map): %v", err) return nil, status.Errorf(codes.Internal, "adminserver: CreateAndInitTree(map): %v", err)
} }
minInterval, err := ptypes.Duration(in.MinInterval) minInterval, err := ptypes.Duration(in.MinInterval)
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return nil, fmt.Errorf("adminserver: Duration(%v): %v", in.MinInterval, err) return nil, status.Errorf(s.Code(), "adminserver: Duration(%v): %v", in.MinInterval, s.Message())
} }
maxInterval, err := ptypes.Duration(in.MaxInterval) maxInterval, err := ptypes.Duration(in.MaxInterval)
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return nil, fmt.Errorf("adminserver: Duration(%v): %v", in.MaxInterval, err) return nil, status.Errorf(s.Code(), "adminserver: Duration(%v): %v", in.MaxInterval, s.Message())
} }


// Initialize log with first map root. // Initialize log with first map root.
Expand All @@ -270,15 +276,15 @@ func (s *Server) CreateDirectory(ctx context.Context, in *pb.CreateDirectoryRequ
MinInterval: minInterval, MinInterval: minInterval,
MaxInterval: maxInterval, MaxInterval: maxInterval,
} }
if err := s.directories.Write(ctx, dir); err != nil { if s := status.Convert(s.directories.Write(ctx, dir)); s.Code() != codes.OK {
return nil, fmt.Errorf("adminserver: directories.Write(): %v", err) return nil, status.Errorf(s.Code(), "adminserver: directories.Write(): %v", s.Message())
} }


// Create initial logs for writing. // Create initial logs for writing.
// TODO(#1063): Additional logs can be added at a later point to support increased server load. // TODO(#1063): Additional logs can be added at a later point to support increased server load.
logIDs := []int64{1, 2} logIDs := []int64{1, 2}
if err := s.logsAdmin.AddLogs(ctx, in.GetDirectoryId(), logIDs...); err != nil { if s := status.Convert(s.logsAdmin.AddLogs(ctx, in.GetDirectoryId(), logIDs...)); s.Code() != codes.OK {
return nil, fmt.Errorf("adminserver: AddLogs(%+v): %v", logIDs, err) return nil, status.Errorf(s.Code(), "adminserver: AddLogs(%+v): %v", logIDs, s.Message())
} }


d := &pb.Directory{ d := &pb.Directory{
Expand All @@ -303,29 +309,29 @@ func (s *Server) initialize(ctx context.Context, logTree, mapTree *tpb.Tree) err
trustedRoot := types.LogRootV1{} // Automatically trust the first observed log root. trustedRoot := types.LogRootV1{} // Automatically trust the first observed log root.


logClient, err := client.NewFromTree(s.tlog, logTree, trustedRoot) logClient, err := client.NewFromTree(s.tlog, logTree, trustedRoot)
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return fmt.Errorf("adminserver: could not create log client: %v", err) return status.Errorf(s.Code(), "adminserver: could not create log client: %v", s.Message())
} }


// Wait for the latest log root to become available. // Wait for the latest log root to become available.
logRoot, err := logClient.UpdateRoot(ctx) logRoot, err := logClient.UpdateRoot(ctx)
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return fmt.Errorf("adminserver: UpdateRoot(): %v", err) return status.Errorf(s.Code(), "adminserver: UpdateRoot(): %v", s.Message())
} }


req := &tpb.GetSignedMapRootByRevisionRequest{MapId: mapID, Revision: 0} req := &tpb.GetSignedMapRootByRevisionRequest{MapId: mapID, Revision: 0}
// TODO(gbelvin): does this need to be in a retry loop? // TODO(gbelvin): does this need to be in a retry loop?
resp, err := s.tmap.GetSignedMapRootByRevision(ctx, req) resp, err := s.tmap.GetSignedMapRootByRevision(ctx, req)
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return fmt.Errorf("adminserver: GetSignedMapRootByRevision(%v,0): %v", mapID, err) return status.Errorf(s.Code(), "adminserver: GetSignedMapRootByRevision(%v,0): %v", mapID, s.Message())
} }
mapVerifier, err := client.NewMapVerifierFromTree(mapTree) mapVerifier, err := client.NewMapVerifierFromTree(mapTree)
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return fmt.Errorf("adminserver: NewMapVerifierFromTree(): %v", err) return status.Errorf(s.Code(), "adminserver: NewMapVerifierFromTree(): %v", s.Message())
} }
mapRoot, err := mapVerifier.VerifySignedMapRoot(resp.GetMapRoot()) mapRoot, err := mapVerifier.VerifySignedMapRoot(resp.GetMapRoot())
if err != nil { if s := status.Convert(err); s.Code() != codes.OK {
return fmt.Errorf("adminserver: VerifySignedMapRoot(): %v", err) return status.Errorf(s.Code(), "adminserver: VerifySignedMapRoot(): %v", s.Message())
} }


// If the tree is empty and the map is empty, // If the tree is empty and the map is empty,
Expand All @@ -336,8 +342,8 @@ func (s *Server) initialize(ctx context.Context, logTree, mapTree *tpb.Tree) err


glog.Infof("Initializing Trillian Log %v with empty map root", logID) glog.Infof("Initializing Trillian Log %v with empty map root", logID)


if err := logClient.AddSequencedLeafAndWait(ctx, resp.GetMapRoot().GetMapRoot(), int64(mapRoot.Revision)); err != nil { if s := status.Convert(logClient.AddSequencedLeafAndWait(ctx, resp.GetMapRoot().GetMapRoot(), int64(mapRoot.Revision))); s.Code() != codes.OK {
return fmt.Errorf("adminserver: log.AddSequencedLeaf(%v): %v", mapRoot.Revision, err) return status.Errorf(s.Code(), "adminserver: log.AddSequencedLeaf(%v): %v", mapRoot.Revision, s.Message())
} }
return nil return nil
} }
Expand Down Expand Up @@ -370,6 +376,38 @@ func (s *Server) UndeleteDirectory(ctx context.Context, in *pb.UndeleteDirectory
return nil, status.Errorf(codes.Unimplemented, "not implemented") return nil, status.Errorf(codes.Unimplemented, "not implemented")
} }


// ListInputLogs returns a list of input logs for a directory.
func (s *Server) ListInputLogs(ctx context.Context, in *pb.ListInputLogsRequest) (*pb.ListInputLogsResponse, error) {
logIDs, err := s.logsAdmin.ListLogs(ctx, in.GetDirectoryId(), in.GetFilterWritable())
if s := status.Convert(err); s.Code() != codes.OK {
return nil, status.Errorf(s.Code(), "adminserver: ListLogs(): %v", s.Message())
}
inputLogs := make([]*pb.InputLog, 0, len(logIDs))
for _, logID := range logIDs {
inputLogs = append(inputLogs, &pb.InputLog{LogId: logID, Writable: true})
}

return &pb.ListInputLogsResponse{Logs: inputLogs}, nil
}

// CreateInputLog returns the created log.
func (s *Server) CreateInputLog(ctx context.Context, in *pb.InputLog) (*pb.InputLog, error) {
err := s.logsAdmin.AddLogs(ctx, in.GetDirectoryId(), in.GetLogId())
if s := status.Convert(err); s.Code() != codes.OK {
return nil, status.Errorf(s.Code(), "adminserver: AddLogs(%+v): %v", in.GetLogId(), s.Message())
}
return &pb.InputLog{LogId: in.GetLogId(), Writable: true}, nil
}

// UpdateInputLog updates the write bit for an input log.
func (s *Server) UpdateInputLog(ctx context.Context, in *pb.InputLog) (*pb.InputLog, error) {
err := s.logsAdmin.SetWritable(ctx, in.GetDirectoryId(), in.GetLogId(), in.GetWritable())
if s := status.Convert(err); s.Code() != codes.OK {
return nil, status.Errorf(s.Code(), "adminserver: SetWritable(): %v", s.Message())
}
return in, nil
}

// GarbageCollect looks for directories that have been deleted before the specified timestamp and fully deletes them. // GarbageCollect looks for directories that have been deleted before the specified timestamp and fully deletes them.
func (s *Server) GarbageCollect(ctx context.Context, in *pb.GarbageCollectRequest) (*pb.GarbageCollectResponse, error) { func (s *Server) GarbageCollect(ctx context.Context, in *pb.GarbageCollectRequest) (*pb.GarbageCollectResponse, error) {
before, err := ptypes.Timestamp(in.GetBefore()) before, err := ptypes.Timestamp(in.GetBefore())
Expand Down
6 changes: 3 additions & 3 deletions core/adminserver/admin_server_test.go
Expand Up @@ -81,9 +81,9 @@ func (e *miniEnv) Close() {


type fakeQueueAdmin struct{} type fakeQueueAdmin struct{}


func (fakeQueueAdmin) AddLogs(ctx context.Context, directoryID string, logIDs ...int64) error { func (fakeQueueAdmin) AddLogs(_ context.Context, _ string, _ ...int64) error { return nil }
return nil func (fakeQueueAdmin) SetWritable(_ context.Context, _ string, _ int64, _ bool) error { return nil }
} func (fakeQueueAdmin) ListLogs(_ context.Context, _ string, _ bool) ([]int64, error) { return nil, nil }


func TestCreateDirectory(t *testing.T) { func TestCreateDirectory(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
Expand Down
2 changes: 0 additions & 2 deletions core/api/gen.go
Expand Up @@ -20,5 +20,3 @@ package api


//go:generate protoc -I=. -I=$GOPATH/src/github.com/google/trillian/ -I=$GOPATH/src/github.com/googleapis/googleapis/ -I=$GOPATH/src/github.com/google/tink/proto --go_out=,plugins=grpc:$GOPATH/src monitor/v1/monitor.proto //go:generate protoc -I=. -I=$GOPATH/src/github.com/google/trillian/ -I=$GOPATH/src/github.com/googleapis/googleapis/ -I=$GOPATH/src/github.com/google/tink/proto --go_out=,plugins=grpc:$GOPATH/src monitor/v1/monitor.proto
//go:generate protoc -I=. -I=$GOPATH/src/github.com/google/trillian/ -I=$GOPATH/src/github.com/googleapis/googleapis/ -I=$GOPATH/src/github.com/google/tink/proto --grpc-gateway_out=logtostderr=true:$GOPATH/src monitor/v1/monitor.proto //go:generate protoc -I=. -I=$GOPATH/src/github.com/google/trillian/ -I=$GOPATH/src/github.com/googleapis/googleapis/ -I=$GOPATH/src/github.com/google/tink/proto --grpc-gateway_out=logtostderr=true:$GOPATH/src monitor/v1/monitor.proto

//go:generate protoc -I=. -I=$GOPATH/src/github.com/google/trillian/ -I=$GOPATH/src/github.com/googleapis/googleapis -I=$GOPATH/src/github.com/google/tink/proto --go_out=:$GOPATH/src type/type.proto
13 changes: 13 additions & 0 deletions core/api/monitor/v1/monitor_go_proto/monitor.pb.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

37 changes: 37 additions & 0 deletions core/api/v1/admin.proto
Expand Up @@ -93,6 +93,25 @@ message UndeleteDirectoryRequest {
string directory_id = 1; string directory_id = 1;
} }


message ListInputLogsRequest {
string directory_id = 1;
// filter_writable will only return writable logs when set.
bool filter_writable = 2;
}

message ListInputLogsResponse {
repeated InputLog logs = 1;
}

// InputLog is an input log for a directory.
message InputLog {
string directory_id = 1;
int64 log_id = 2;
// writable controls whether new log items will be sent to this log.
// writable is not set by ListInputLogs.
bool writable = 3;
}

// GarbageCollect request. // GarbageCollect request.
message GarbageCollectRequest { message GarbageCollectRequest {
// Soft-deleted directories with a deleted timestamp before this will be fully // Soft-deleted directories with a deleted timestamp before this will be fully
Expand Down Expand Up @@ -148,6 +167,24 @@ service KeyTransparencyAdmin {
delete: "/v1/directories/{directory_id}:undelete" delete: "/v1/directories/{directory_id}:undelete"
}; };
} }
// ListInputLogs returns a list of input logs for a directory.
rpc ListInputLogs(ListInputLogsRequest) returns (ListInputLogsResponse) {
option (google.api.http) = {
get: "/v1/directories/{directory_id}/inputlogs"
};
}
// CreateInputLog returns a the created log.
rpc CreateInputLog(InputLog) returns (InputLog) {
option (google.api.http) = {
post: "/v1/directories/{directory_id}/inputlogs/{log_id}"
};
}
// UpdateInputLog updates the write bit for an input log.
rpc UpdateInputLog(InputLog) returns (InputLog) {
option (google.api.http) = {
put: "/v1/directories/{directory_id}/inputlogs/{log_id}"
};
}
// Fully delete soft-deleted directories that have been soft-deleted before // Fully delete soft-deleted directories that have been soft-deleted before
// the specified timestamp. // the specified timestamp.
rpc GarbageCollect(GarbageCollectRequest) returns (GarbageCollectResponse); rpc GarbageCollect(GarbageCollectRequest) returns (GarbageCollectResponse);
Expand Down

0 comments on commit 65451bd

Please sign in to comment.