Skip to content
This repository was archived by the owner on Sep 28, 2022. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
## Change Log

* **master**
* Added `field.rows` and `index.group_by` calls.

* **v1.2.0** (2018-12-21)
* **Compatible with Pilosa 1.2**
* Added `track_existence` index option.
* Added `not_` index method to support `Not` queries. The corresponding index must be created with `track_existence=True`.
* Added `not_` index method to support `Not` queries. The corresponding index must be created with `track_existence=True`.
* Added mutex and bool fields.
* Added `field.clear_row`, `field.store` and `index.options` calls.
* Added support for roaring importing `RowIDColumnID` with timestamp data.
Expand Down
52 changes: 27 additions & 25 deletions docs/data-model-queries.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,31 +103,33 @@ Please check [Pilosa documentation](https://www.pilosa.com/docs) for PQL details

Index:

* `union(self, *rows)`
* `intersect(self, *rows)`
* `difference(self, *rows)`
* `count(self, row)`
* `set_column_attrs(self, column_id, attrs)`
* `xor(self, *rows)`
* `not_(self, row)`
* `options(self, row_query, column_attrs=False, exclude_columns=False, exclude_row_attrs=False, shards=[])`
* `union(*rows)`
* `intersect(*rows)`
* `difference(*rows)`
* `count(row)`
* `set_column_attrs(column_id, attrs)`
* `xor(*rows)`
* `not_(row)`
* `options(row_query, column_attrs=False, exclude_columns=False, exclude_row_attrs=False, shards=[])`
* `group_by(*rows_queries, limit=0, filter=None)`

Field:

* `row(self, row_id)`
* `set(self, row_id, column_id, timestamp=None)`
* `clear(self, row_id, column_id)`
* `topn(self, n, row=None, field="", *values)`
* `range(self, row_id, start, end)`
* `set_row_attrs(self, row_id, attrs)`
* `lt(self, n)`
* `lte(self, n)`
* `gt(self, n)`
* `gte(self, n)`
* `between(self, a, b)`
* `sum(self, row=None)`
* `min(self, row=None)`
* `max(self, row=None)`
* `setvalue(self, column_id, value)`
* `store(self, row_query, row)`
* `clear_row(self, row)`
* `row(row_id)`
* `set(row_id, column_id, timestamp=None)`
* `clear(row_id, column_id)`
* `topn(n, row=None, field="", *values)`
* `range(row_id, start, end)`
* `set_row_attrs(row_id, attrs)`
* `lt(n)`
* `lte(n)`
* `gt(n)`
* `gte(n)`
* `between(a, b)`
* `sum(row=None)`
* `min(row=None)`
* `max(row=None)`
* `setvalue(column_id, value)`
* `store(row_query, row)`
* `clear_row(row)`
* `rows(prev_row=None, limit=0, column=None)`
33 changes: 33 additions & 0 deletions integration_tests/test_client_it.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
from pilosa.client import Client, URI, Cluster, PilosaServerError
from pilosa.exceptions import PilosaError
from pilosa.orm import Index, TimeQuantum, Schema, CacheType
from pilosa.response import GroupCount, FieldRow
from pilosa.imports import csv_column_reader, csv_field_value_reader, \
csv_column_id_value, csv_column_key_value, csv_row_key_column_id

Expand Down Expand Up @@ -579,6 +580,38 @@ def test_range_field(self):
self.assertEquals(1, len(response.results))
self.assertEquals(10, response.result.row.columns[0])

def test_rows(self):
client = self.get_client()
index = self.index
field = index.field("rowsfield")
client.ensure_field(field)
client.query(index.batch_query(
field.set(1, 100),
field.set(1, 200),
field.set(2, 200),
))
resp = client.query(field.rows())
target = [1, 2]
self.assertEqual(target, resp.result.row_identifiers.ids)

def test_group_by(self):
client = self.get_client()
index = self.index
field = index.field("groupbyfield")
client.ensure_field(field)
client.query(index.batch_query(
field.set(1, 100),
field.set(1, 200),
field.set(2, 200),
))
resp = client.query(index.group_by(field.rows()))
target = [
GroupCount([FieldRow("groupbyfield", 1)], 2),
GroupCount([FieldRow("groupbyfield", 2)], 1),
]
self.assertEqual(target, resp.result.group_counts)


def test_exclude_attrs_columns(self):
client = self.get_client()
client.query(self.col_index.batch_query(
Expand Down
3 changes: 1 addition & 2 deletions pilosa/imports.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,7 @@ def __init__(self, column_id=0, column_key="", value=0):
self.value = value

def __hash__(self):
return hash("%s:%s:%s" % \
(self.column_id, self.column_key, self.value))
return hash((self.column_id, self.column_key, self.value))

def __eq__(self, other):
if id(self) == id(other):
Expand Down
25 changes: 16 additions & 9 deletions pilosa/internal/public.proto
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
syntax = "proto3";

package internal;

message Row {
repeated uint64 Columns = 1;
repeated string Keys = 3;
Expand All @@ -17,12 +19,13 @@ message Pair {
uint64 Count = 2;
}

message FieldRow {
message FieldRow{
string Field = 1;
uint64 RowID = 2;
string RowKey = 3;
}

message GroupCount {
message GroupCount{
repeated FieldRow Group = 1;
uint64 Count = 2;
}
Expand All @@ -32,12 +35,6 @@ message ValCount {
int64 Count = 2;
}

message Bit {
uint64 RowID = 1;
uint64 ColumnID = 2;
int64 Timestamp = 3;
}

message ColumnAttrSet {
uint64 ID = 1;
string Key = 3;
Expand Down Expand Up @@ -78,7 +75,7 @@ message QueryResult {
uint64 N = 2;
repeated Pair Pairs = 3;
bool Changed = 4;
ValCount ValCount = 5;
ValCount ValCount = 5;
repeated uint64 RowIDs = 7;
repeated GroupCount GroupCounts = 8;
RowIdentifiers RowIdentifiers = 9;
Expand All @@ -104,6 +101,16 @@ message ImportValueRequest {
repeated int64 Values = 6;
}

message TranslateKeysRequest {
string Index = 1;
string Field = 2;
repeated string Keys = 3;
}

message TranslateKeysResponse {
repeated uint64 IDs = 3;
}

message ImportRoaringRequestView {
string Name = 1;
bytes Data = 2;
Expand Down
Loading