| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,176 @@ | ||
| package influxdb | ||
|
|
||
| import ( | ||
| "context" | ||
| "fmt" | ||
| "log" | ||
| "math" | ||
|
|
||
| "github.com/influxdata/flux" | ||
| "github.com/influxdata/flux/execute" | ||
| "github.com/influxdata/flux/memory" | ||
| "github.com/influxdata/flux/semantic" | ||
| ) | ||
|
|
||
| // source performs storage reads | ||
| type source struct { | ||
| id execute.DatasetID | ||
| reader Reader | ||
| readSpec ReadSpec | ||
| window execute.Window | ||
| bounds execute.Bounds | ||
| alloc *memory.Allocator | ||
|
|
||
| ts []execute.Transformation | ||
|
|
||
| currentTime execute.Time | ||
| overflow bool | ||
| } | ||
|
|
||
| func NewSource(id execute.DatasetID, r Reader, readSpec ReadSpec, bounds execute.Bounds, w execute.Window, currentTime execute.Time, alloc *memory.Allocator) execute.Source { | ||
| return &source{ | ||
| id: id, | ||
| reader: r, | ||
| readSpec: readSpec, | ||
| bounds: bounds, | ||
| window: w, | ||
| currentTime: currentTime, | ||
| alloc: alloc, | ||
| } | ||
| } | ||
|
|
||
| func (s *source) AddTransformation(t execute.Transformation) { | ||
| s.ts = append(s.ts, t) | ||
| } | ||
|
|
||
| func (s *source) Run(ctx context.Context) { | ||
| err := s.run(ctx) | ||
| for _, t := range s.ts { | ||
| t.Finish(s.id, err) | ||
| } | ||
| } | ||
|
|
||
| func (s *source) run(ctx context.Context) error { | ||
| //TODO(nathanielc): Pass through context to actual network I/O. | ||
| for tables, mark, ok := s.next(ctx); ok; tables, mark, ok = s.next(ctx) { | ||
| err := tables.Do(func(tbl flux.Table) error { | ||
| for _, t := range s.ts { | ||
| if err := t.Process(s.id, tbl); err != nil { | ||
| return err | ||
| } | ||
| //TODO(nathanielc): Also add mechanism to send UpdateProcessingTime calls, when no data is arriving. | ||
| // This is probably not needed for this source, but other sources should do so. | ||
| if err := t.UpdateProcessingTime(s.id, execute.Now()); err != nil { | ||
| return err | ||
| } | ||
| } | ||
| return nil | ||
| }) | ||
| if err != nil { | ||
| return err | ||
| } | ||
|
|
||
| for _, t := range s.ts { | ||
| if err := t.UpdateWatermark(s.id, mark); err != nil { | ||
| return err | ||
| } | ||
| } | ||
| } | ||
| return nil | ||
| } | ||
|
|
||
| func (s *source) next(ctx context.Context) (flux.TableIterator, execute.Time, bool) { | ||
| if s.overflow { | ||
| return nil, 0, false | ||
| } | ||
|
|
||
| start := s.currentTime - execute.Time(s.window.Period) | ||
| stop := s.currentTime | ||
| if stop > s.bounds.Stop { | ||
| return nil, 0, false | ||
| } | ||
|
|
||
| // Check if we will overflow, if so we are done after this pass | ||
| every := execute.Time(s.window.Every) | ||
| if every > 0 { | ||
| s.overflow = s.currentTime > math.MaxInt64-every | ||
| } else { | ||
| s.overflow = s.currentTime < math.MinInt64-every | ||
| } | ||
| s.currentTime = s.currentTime + every | ||
|
|
||
| bi, err := s.reader.Read( | ||
| ctx, | ||
| s.readSpec, | ||
| start, | ||
| stop, | ||
| s.alloc, | ||
| ) | ||
| if err != nil { | ||
| log.Println("E!", err) | ||
| return nil, 0, false | ||
| } | ||
| return bi, stop, true | ||
| } | ||
|
|
||
| type GroupMode int | ||
|
|
||
| const ( | ||
| // GroupModeDefault specifies the default grouping mode, which is GroupModeAll. | ||
| GroupModeDefault GroupMode = 0 | ||
| // GroupModeNone merges all series into a single group. | ||
| GroupModeNone GroupMode = 1 << iota | ||
| // GroupModeAll produces a separate table for each series. | ||
| GroupModeAll | ||
| // GroupModeBy produces a table for each unique value of the specified GroupKeys. | ||
| GroupModeBy | ||
| // GroupModeExcept produces a table for the unique values of all keys, except those specified by GroupKeys. | ||
| GroupModeExcept | ||
| ) | ||
|
|
||
| // ToGroupMode accepts the group mode from Flux and produces the appropriate storage group mode. | ||
| func ToGroupMode(fluxMode flux.GroupMode) GroupMode { | ||
| switch fluxMode { | ||
| case flux.GroupModeNone: | ||
| return GroupModeDefault | ||
| case flux.GroupModeBy: | ||
| return GroupModeBy | ||
| case flux.GroupModeExcept: | ||
| return GroupModeExcept | ||
| default: | ||
| panic(fmt.Sprint("unknown group mode: ", fluxMode)) | ||
| } | ||
| } | ||
|
|
||
| type ReadSpec struct { | ||
| Database string | ||
| RetentionPolicy string | ||
|
|
||
| RAMLimit uint64 | ||
| Hosts []string | ||
| Predicate *semantic.FunctionExpression | ||
| PointsLimit int64 | ||
| SeriesLimit int64 | ||
| SeriesOffset int64 | ||
| Descending bool | ||
|
|
||
| AggregateMethod string | ||
|
|
||
| // OrderByTime indicates that series reads should produce all | ||
| // series for a time before producing any series for a larger time. | ||
| // By default this is false meaning all values of time are produced for a given series, | ||
| // before any values are produced from the next series. | ||
| OrderByTime bool | ||
| // GroupMode instructs | ||
| GroupMode GroupMode | ||
| // GroupKeys is the list of dimensions along which to group. | ||
| // | ||
| // When GroupMode is GroupModeBy, the results will be grouped by the specified keys. | ||
| // When GroupMode is GroupModeExcept, the results will be grouped by all keys, except those specified. | ||
| GroupKeys []string | ||
| } | ||
|
|
||
| type Reader interface { | ||
| Read(ctx context.Context, rs ReadSpec, start, stop execute.Time, alloc *memory.Allocator) (flux.TableIterator, error) | ||
| Close() | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,199 @@ | ||
| package v1 | ||
|
|
||
| import ( | ||
| "context" | ||
| "errors" | ||
| "fmt" | ||
|
|
||
| "github.com/influxdata/flux" | ||
| "github.com/influxdata/flux/execute" | ||
| "github.com/influxdata/flux/memory" | ||
| "github.com/influxdata/flux/plan" | ||
| v1 "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1" | ||
| "github.com/influxdata/flux/values" | ||
| "github.com/influxdata/influxdb/coordinator" | ||
| "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" | ||
| "github.com/influxdata/influxdb/services/meta" | ||
| "github.com/influxdata/influxql" | ||
| ) | ||
|
|
||
| const DatabasesKind = v1.DatabasesKind | ||
|
|
||
| type DatabasesOpSpec struct { | ||
| } | ||
|
|
||
| func init() { | ||
| flux.ReplacePackageValue("influxdata/influxdb/v1", DatabasesKind, flux.FunctionValue(DatabasesKind, createDatabasesOpSpec, v1.DatabasesSignature)) | ||
| flux.RegisterOpSpec(DatabasesKind, newDatabasesOp) | ||
| plan.RegisterProcedureSpec(DatabasesKind, newDatabasesProcedure, DatabasesKind) | ||
| } | ||
|
|
||
| func createDatabasesOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) { | ||
| spec := new(DatabasesOpSpec) | ||
| return spec, nil | ||
| } | ||
|
|
||
| func newDatabasesOp() flux.OperationSpec { | ||
| return new(DatabasesOpSpec) | ||
| } | ||
|
|
||
| func (s *DatabasesOpSpec) Kind() flux.OperationKind { | ||
| return DatabasesKind | ||
| } | ||
|
|
||
| type DatabasesProcedureSpec struct { | ||
| plan.DefaultCost | ||
| } | ||
|
|
||
| func newDatabasesProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { | ||
| _, ok := qs.(*DatabasesOpSpec) | ||
| if !ok { | ||
| return nil, fmt.Errorf("invalid spec type %T", qs) | ||
| } | ||
|
|
||
| return &DatabasesProcedureSpec{}, nil | ||
| } | ||
|
|
||
| func (s *DatabasesProcedureSpec) Kind() plan.ProcedureKind { | ||
| return DatabasesKind | ||
| } | ||
|
|
||
| func (s *DatabasesProcedureSpec) Copy() plan.ProcedureSpec { | ||
| ns := new(DatabasesProcedureSpec) | ||
| return ns | ||
| } | ||
|
|
||
| func init() { | ||
| execute.RegisterSource(DatabasesKind, createDatabasesSource) | ||
| } | ||
|
|
||
| type DatabasesDecoder struct { | ||
| deps *DatabaseDependencies | ||
| databases []meta.DatabaseInfo | ||
| user meta.User | ||
| alloc *memory.Allocator | ||
| ctx context.Context | ||
| } | ||
|
|
||
| func (bd *DatabasesDecoder) Connect() error { | ||
| return nil | ||
| } | ||
|
|
||
| func (bd *DatabasesDecoder) Fetch() (bool, error) { | ||
| bd.databases = bd.deps.MetaClient.Databases() | ||
| return false, nil | ||
| } | ||
|
|
||
| func (bd *DatabasesDecoder) Decode() (flux.Table, error) { | ||
| kb := execute.NewGroupKeyBuilder(nil) | ||
| kb.AddKeyValue("organizationID", values.NewString("")) | ||
| gk, err := kb.Build() | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
|
|
||
| b := execute.NewColListTableBuilder(gk, bd.alloc) | ||
|
|
||
| if _, err := b.AddCol(flux.ColMeta{ | ||
| Label: "organizationID", | ||
| Type: flux.TString, | ||
| }); err != nil { | ||
| return nil, err | ||
| } | ||
| if _, err := b.AddCol(flux.ColMeta{ | ||
| Label: "databaseName", | ||
| Type: flux.TString, | ||
| }); err != nil { | ||
| return nil, err | ||
| } | ||
| if _, err := b.AddCol(flux.ColMeta{ | ||
| Label: "retentionPolicy", | ||
| Type: flux.TString, | ||
| }); err != nil { | ||
| return nil, err | ||
| } | ||
| if _, err := b.AddCol(flux.ColMeta{ | ||
| Label: "retentionPeriod", | ||
| Type: flux.TInt, | ||
| }); err != nil { | ||
| return nil, err | ||
| } | ||
| if _, err := b.AddCol(flux.ColMeta{ | ||
| Label: "default", | ||
| Type: flux.TBool, | ||
| }); err != nil { | ||
| return nil, err | ||
| } | ||
| if _, err := b.AddCol(flux.ColMeta{ | ||
| Label: "bucketId", | ||
| Type: flux.TString, | ||
| }); err != nil { | ||
| return nil, err | ||
| } | ||
|
|
||
| var hasAccess func(db string) bool | ||
| if bd.user == nil { | ||
| hasAccess = func(db string) bool { | ||
| return true | ||
| } | ||
| } else { | ||
| hasAccess = func(db string) bool { | ||
| return bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.ReadPrivilege, db) == nil || | ||
| bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.WritePrivilege, db) == nil | ||
| } | ||
| } | ||
|
|
||
| for _, db := range bd.databases { | ||
| if hasAccess(db.Name) { | ||
| for _, rp := range db.RetentionPolicies { | ||
| _ = b.AppendString(0, "") | ||
| _ = b.AppendString(1, db.Name) | ||
| _ = b.AppendString(2, rp.Name) | ||
| _ = b.AppendInt(3, rp.Duration.Nanoseconds()) | ||
| _ = b.AppendBool(4, db.DefaultRetentionPolicy == rp.Name) | ||
| _ = b.AppendString(5, "") | ||
| } | ||
| } | ||
| } | ||
|
|
||
| return b.Table() | ||
| } | ||
|
|
||
| func (bd *DatabasesDecoder) Close() error { | ||
| return nil | ||
| } | ||
|
|
||
| func createDatabasesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { | ||
| _, ok := prSpec.(*DatabasesProcedureSpec) | ||
| if !ok { | ||
| return nil, fmt.Errorf("invalid spec type %T", prSpec) | ||
| } | ||
|
|
||
| deps := a.Dependencies()[DatabasesKind].(DatabaseDependencies) | ||
| var user meta.User | ||
| if deps.AuthEnabled { | ||
| user = meta.UserFromContext(a.Context()) | ||
| if user == nil { | ||
| return nil, errors.New("createDatabasesSource: no user") | ||
| } | ||
| } | ||
| bd := &DatabasesDecoder{deps: &deps, alloc: a.Allocator(), ctx: a.Context(), user: user} | ||
| return execute.CreateSourceFromDecoder(bd, dsid, a) | ||
| } | ||
|
|
||
| type DatabaseDependencies struct { | ||
| MetaClient coordinator.MetaClient | ||
| Authorizer influxdb.Authorizer | ||
| AuthEnabled bool | ||
| } | ||
|
|
||
| func InjectDatabaseDependencies(depsMap execute.Dependencies, deps DatabaseDependencies) error { | ||
| if deps.MetaClient == nil { | ||
| return errors.New("missing meta client dependency") | ||
| } | ||
| if deps.AuthEnabled && deps.Authorizer == nil { | ||
| return errors.New("missing authorizer with auth enabled") | ||
| } | ||
| depsMap[DatabasesKind] = deps | ||
| return nil | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,7 @@ | ||
| package stdlib | ||
|
|
||
| // Import all stdlib packages | ||
| import ( | ||
| _ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" | ||
| _ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb/v1" | ||
| ) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,187 @@ | ||
| package mock | ||
|
|
||
| import ( | ||
| "github.com/influxdata/influxdb/models" | ||
| "github.com/influxdata/influxdb/storage/reads" | ||
| "github.com/influxdata/influxdb/storage/reads/datatypes" | ||
| "github.com/influxdata/influxdb/tsdb/cursors" | ||
| "google.golang.org/grpc/metadata" | ||
| ) | ||
|
|
||
| type ResponseStream struct { | ||
| SendFunc func(*datatypes.ReadResponse) error | ||
| SetTrailerFunc func(metadata.MD) | ||
| } | ||
|
|
||
| func NewResponseStream() *ResponseStream { | ||
| return &ResponseStream{ | ||
| SendFunc: func(*datatypes.ReadResponse) error { return nil }, | ||
| SetTrailerFunc: func(mds metadata.MD) {}, | ||
| } | ||
| } | ||
|
|
||
| func (s *ResponseStream) Send(r *datatypes.ReadResponse) error { | ||
| return s.SendFunc(r) | ||
| } | ||
|
|
||
| func (s *ResponseStream) SetTrailer(m metadata.MD) { | ||
| s.SetTrailerFunc(m) | ||
| } | ||
|
|
||
| type ResultSet struct { | ||
| NextFunc func() bool | ||
| CursorFunc func() cursors.Cursor | ||
| TagsFunc func() models.Tags | ||
| CloseFunc func() | ||
| ErrFunc func() error | ||
| StatsFunc func() cursors.CursorStats | ||
| } | ||
|
|
||
| func NewResultSet() *ResultSet { | ||
| return &ResultSet{ | ||
| NextFunc: func() bool { return false }, | ||
| CursorFunc: func() cursors.Cursor { return nil }, | ||
| TagsFunc: func() models.Tags { return nil }, | ||
| CloseFunc: func() {}, | ||
| ErrFunc: func() error { return nil }, | ||
| StatsFunc: func() cursors.CursorStats { return cursors.CursorStats{} }, | ||
| } | ||
| } | ||
|
|
||
| func (rs *ResultSet) Next() bool { | ||
| return rs.NextFunc() | ||
| } | ||
|
|
||
| func (rs *ResultSet) Cursor() cursors.Cursor { | ||
| return rs.CursorFunc() | ||
| } | ||
|
|
||
| func (rs *ResultSet) Tags() models.Tags { | ||
| return rs.TagsFunc() | ||
| } | ||
|
|
||
| func (rs *ResultSet) Close() { | ||
| rs.CloseFunc() | ||
| } | ||
|
|
||
| func (rs *ResultSet) Err() error { | ||
| return rs.ErrFunc() | ||
| } | ||
|
|
||
| func (rs *ResultSet) Stats() cursors.CursorStats { | ||
| return rs.StatsFunc() | ||
| } | ||
|
|
||
| type GroupResultSet struct { | ||
| NextFunc func() reads.GroupCursor | ||
| CloseFunc func() | ||
| ErrFunc func() error | ||
| } | ||
|
|
||
| func NewGroupResultSet() *GroupResultSet { | ||
| return &GroupResultSet{ | ||
| NextFunc: func() reads.GroupCursor { return nil }, | ||
| CloseFunc: func() {}, | ||
| ErrFunc: func() error { return nil }, | ||
| } | ||
| } | ||
|
|
||
| func (rs *GroupResultSet) Next() reads.GroupCursor { | ||
| return rs.NextFunc() | ||
| } | ||
|
|
||
| func (rs *GroupResultSet) Close() { | ||
| rs.CloseFunc() | ||
| } | ||
|
|
||
| func (rs *GroupResultSet) Err() error { | ||
| return rs.ErrFunc() | ||
| } | ||
|
|
||
| type IntegerArrayCursor struct { | ||
| CloseFunc func() | ||
| Errfunc func() error | ||
| StatsFunc func() cursors.CursorStats | ||
| NextFunc func() *cursors.IntegerArray | ||
| } | ||
|
|
||
| func NewIntegerArrayCursor() *IntegerArrayCursor { | ||
| return &IntegerArrayCursor{ | ||
| CloseFunc: func() {}, | ||
| Errfunc: func() error { return nil }, | ||
| StatsFunc: func() cursors.CursorStats { return cursors.CursorStats{} }, | ||
| NextFunc: func() *cursors.IntegerArray { return &cursors.IntegerArray{} }, | ||
| } | ||
| } | ||
|
|
||
| func (c *IntegerArrayCursor) Close() { | ||
| c.CloseFunc() | ||
| } | ||
|
|
||
| func (c *IntegerArrayCursor) Err() error { | ||
| return c.Errfunc() | ||
| } | ||
|
|
||
| func (c *IntegerArrayCursor) Stats() cursors.CursorStats { | ||
| return c.StatsFunc() | ||
| } | ||
|
|
||
| func (c *IntegerArrayCursor) Next() *cursors.IntegerArray { | ||
| return c.NextFunc() | ||
| } | ||
|
|
||
| type GroupCursor struct { | ||
| NextFunc func() bool | ||
| CursorFunc func() cursors.Cursor | ||
| TagsFunc func() models.Tags | ||
| KeysFunc func() [][]byte | ||
| PartitionKeyValsFunc func() [][]byte | ||
| CloseFunc func() | ||
| ErrFunc func() error | ||
| StatsFunc func() cursors.CursorStats | ||
| } | ||
|
|
||
| func NewGroupCursor() *GroupCursor { | ||
| return &GroupCursor{ | ||
| NextFunc: func() bool { return false }, | ||
| CursorFunc: func() cursors.Cursor { return nil }, | ||
| TagsFunc: func() models.Tags { return nil }, | ||
| KeysFunc: func() [][]byte { return nil }, | ||
| PartitionKeyValsFunc: func() [][]byte { return nil }, | ||
| CloseFunc: func() {}, | ||
| ErrFunc: func() error { return nil }, | ||
| StatsFunc: func() cursors.CursorStats { return cursors.CursorStats{} }, | ||
| } | ||
| } | ||
|
|
||
| func (c *GroupCursor) Next() bool { | ||
| return c.NextFunc() | ||
| } | ||
|
|
||
| func (c *GroupCursor) Cursor() cursors.Cursor { | ||
| return c.CursorFunc() | ||
| } | ||
|
|
||
| func (c *GroupCursor) Tags() models.Tags { | ||
| return c.TagsFunc() | ||
| } | ||
|
|
||
| func (c *GroupCursor) Keys() [][]byte { | ||
| return c.KeysFunc() | ||
| } | ||
|
|
||
| func (c *GroupCursor) PartitionKeyVals() [][]byte { | ||
| return c.PartitionKeyValsFunc() | ||
| } | ||
|
|
||
| func (c *GroupCursor) Close() { | ||
| c.CloseFunc() | ||
| } | ||
|
|
||
| func (c *GroupCursor) Err() error { | ||
| return c.ErrFunc() | ||
| } | ||
|
|
||
| func (c *GroupCursor) Stats() cursors.CursorStats { | ||
| return c.StatsFunc() | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,163 @@ | ||
| package reads | ||
|
|
||
| import ( | ||
| "context" | ||
| "fmt" | ||
|
|
||
| "github.com/influxdata/influxdb/storage/reads/datatypes" | ||
| "github.com/influxdata/influxdb/tsdb/cursors" | ||
| ) | ||
|
|
||
| type singleValue struct { | ||
| v interface{} | ||
| } | ||
|
|
||
| func (v *singleValue) Value(key string) (interface{}, bool) { | ||
| return v.v, true | ||
| } | ||
|
|
||
| func newAggregateArrayCursor(ctx context.Context, agg *datatypes.Aggregate, cursor cursors.Cursor) cursors.Cursor { | ||
| if cursor == nil { | ||
| return nil | ||
| } | ||
|
|
||
| switch agg.Type { | ||
| case datatypes.AggregateTypeSum: | ||
| return newSumArrayCursor(cursor) | ||
| case datatypes.AggregateTypeCount: | ||
| return newCountArrayCursor(cursor) | ||
| default: | ||
| // TODO(sgc): should be validated higher up | ||
| panic("invalid aggregate") | ||
| } | ||
| } | ||
|
|
||
| func newSumArrayCursor(cur cursors.Cursor) cursors.Cursor { | ||
| switch cur := cur.(type) { | ||
| case cursors.FloatArrayCursor: | ||
| return newFloatArraySumCursor(cur) | ||
| case cursors.IntegerArrayCursor: | ||
| return newIntegerArraySumCursor(cur) | ||
| case cursors.UnsignedArrayCursor: | ||
| return newUnsignedArraySumCursor(cur) | ||
| default: | ||
| // TODO(sgc): propagate an error instead? | ||
| return nil | ||
| } | ||
| } | ||
|
|
||
| func newCountArrayCursor(cur cursors.Cursor) cursors.Cursor { | ||
| switch cur := cur.(type) { | ||
| case cursors.FloatArrayCursor: | ||
| return &integerFloatCountArrayCursor{FloatArrayCursor: cur} | ||
| case cursors.IntegerArrayCursor: | ||
| return &integerIntegerCountArrayCursor{IntegerArrayCursor: cur} | ||
| case cursors.UnsignedArrayCursor: | ||
| return &integerUnsignedCountArrayCursor{UnsignedArrayCursor: cur} | ||
| case cursors.StringArrayCursor: | ||
| return &integerStringCountArrayCursor{StringArrayCursor: cur} | ||
| case cursors.BooleanArrayCursor: | ||
| return &integerBooleanCountArrayCursor{BooleanArrayCursor: cur} | ||
| default: | ||
| panic(fmt.Sprintf("unreachable: %T", cur)) | ||
| } | ||
| } | ||
|
|
||
| type cursorContext struct { | ||
| ctx context.Context | ||
| req *cursors.CursorRequest | ||
| itrs cursors.CursorIterators | ||
| limit int64 | ||
| count int64 | ||
| err error | ||
| } | ||
|
|
||
| type multiShardArrayCursors struct { | ||
| ctx context.Context | ||
| limit int64 | ||
| req cursors.CursorRequest | ||
|
|
||
| cursors struct { | ||
| i integerMultiShardArrayCursor | ||
| f floatMultiShardArrayCursor | ||
| u unsignedMultiShardArrayCursor | ||
| b booleanMultiShardArrayCursor | ||
| s stringMultiShardArrayCursor | ||
| } | ||
| } | ||
|
|
||
| func newMultiShardArrayCursors(ctx context.Context, start, end int64, asc bool, limit int64) *multiShardArrayCursors { | ||
| if limit < 0 { | ||
| limit = 1 | ||
| } | ||
|
|
||
| m := &multiShardArrayCursors{ | ||
| ctx: ctx, | ||
| limit: limit, | ||
| req: cursors.CursorRequest{ | ||
| Ascending: asc, | ||
| StartTime: start, | ||
| EndTime: end, | ||
| }, | ||
| } | ||
|
|
||
| cc := cursorContext{ | ||
| ctx: ctx, | ||
| limit: limit, | ||
| req: &m.req, | ||
| } | ||
|
|
||
| m.cursors.i.cursorContext = cc | ||
| m.cursors.f.cursorContext = cc | ||
| m.cursors.u.cursorContext = cc | ||
| m.cursors.b.cursorContext = cc | ||
| m.cursors.s.cursorContext = cc | ||
|
|
||
| return m | ||
| } | ||
|
|
||
| func (m *multiShardArrayCursors) createCursor(row SeriesRow) cursors.Cursor { | ||
| m.req.Name = row.Name | ||
| m.req.Tags = row.SeriesTags | ||
| m.req.Field = row.Field | ||
|
|
||
| var cond expression | ||
| if row.ValueCond != nil { | ||
| cond = &astExpr{row.ValueCond} | ||
| } | ||
|
|
||
| var shard cursors.CursorIterator | ||
| var cur cursors.Cursor | ||
| for cur == nil && len(row.Query) > 0 { | ||
| shard, row.Query = row.Query[0], row.Query[1:] | ||
| cur, _ = shard.Next(m.ctx, &m.req) | ||
| } | ||
|
|
||
| if cur == nil { | ||
| return nil | ||
| } | ||
|
|
||
| switch c := cur.(type) { | ||
| case cursors.IntegerArrayCursor: | ||
| m.cursors.i.reset(c, row.Query, cond) | ||
| return &m.cursors.i | ||
| case cursors.FloatArrayCursor: | ||
| m.cursors.f.reset(c, row.Query, cond) | ||
| return &m.cursors.f | ||
| case cursors.UnsignedArrayCursor: | ||
| m.cursors.u.reset(c, row.Query, cond) | ||
| return &m.cursors.u | ||
| case cursors.StringArrayCursor: | ||
| m.cursors.s.reset(c, row.Query, cond) | ||
| return &m.cursors.s | ||
| case cursors.BooleanArrayCursor: | ||
| m.cursors.b.reset(c, row.Query, cond) | ||
| return &m.cursors.b | ||
| default: | ||
| panic(fmt.Sprintf("unreachable: %T", cur)) | ||
| } | ||
| } | ||
|
|
||
| func (m *multiShardArrayCursors) newAggregateCursor(ctx context.Context, agg *datatypes.Aggregate, cursor cursors.Cursor) cursors.Cursor { | ||
| return newAggregateArrayCursor(ctx, agg, cursor) | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,30 @@ | ||
| # List any generated files here | ||
| TARGETS = predicate.pb.go \ | ||
| storage_common.pb.go | ||
|
|
||
| # List any source files used to generate the targets here | ||
| SOURCES = gen.go \ | ||
| predicate.proto \ | ||
| storage_common.proto | ||
|
|
||
| # List any directories that have their own Makefile here | ||
| SUBDIRS = | ||
|
|
||
| # Default target | ||
| all: $(SUBDIRS) $(TARGETS) | ||
|
|
||
| # Recurse into subdirs for same make goal | ||
| $(SUBDIRS): | ||
| $(MAKE) -C $@ $(MAKECMDGOALS) | ||
|
|
||
| # Clean all targets recursively | ||
| clean: $(SUBDIRS) | ||
| rm -f $(TARGETS) | ||
|
|
||
| # Define go generate if not already defined | ||
| GO_GENERATE := go generate | ||
|
|
||
| $(TARGETS): $(SOURCES) | ||
| $(GO_GENERATE) -x | ||
|
|
||
| .PHONY: all clean $(SUBDIRS) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,55 @@ | ||
| package datatypes | ||
|
|
||
| import ( | ||
| "strings" | ||
|
|
||
| "github.com/gogo/protobuf/proto" | ||
| ) | ||
|
|
||
| type HintFlags uint32 | ||
|
|
||
| func (h HintFlags) NoPoints() bool { | ||
| return uint32(h)&uint32(HintNoPoints) != 0 | ||
| } | ||
|
|
||
| func (h *HintFlags) SetNoPoints() { | ||
| *h |= HintFlags(HintNoPoints) | ||
| } | ||
|
|
||
| func (h HintFlags) NoSeries() bool { | ||
| return uint32(h)&uint32(HintNoSeries) != 0 | ||
| } | ||
|
|
||
| func (h *HintFlags) SetNoSeries() { | ||
| *h |= HintFlags(HintNoSeries) | ||
| } | ||
|
|
||
| func (h HintFlags) HintSchemaAllTime() bool { | ||
| return uint32(h)&uint32(HintSchemaAllTime) != 0 | ||
| } | ||
|
|
||
| func (h *HintFlags) SetHintSchemaAllTime() { | ||
| *h |= HintFlags(HintSchemaAllTime) | ||
| } | ||
|
|
||
| func (h HintFlags) String() string { | ||
| f := uint32(h) | ||
|
|
||
| var s []string | ||
| enums := proto.EnumValueMap("influxdata.platform.storage.ReadRequest_HintFlags") | ||
| if h == 0 { | ||
| return "HINT_NONE" | ||
| } | ||
|
|
||
| for k, v := range enums { | ||
| if v == 0 { | ||
| continue | ||
| } | ||
| v := uint32(v) | ||
| if f&v == v { | ||
| s = append(s, k) | ||
| } | ||
| } | ||
|
|
||
| return strings.Join(s, ",") | ||
| } |