Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 7 additions & 11 deletions arango/aql.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,18 +183,15 @@ def execute(self,
:param profile: Return additional profiling details in the cursor,
unless the query cache is used.
:type profile: bool
:param max_transaction_size: Transaction size limit in bytes. Applies
only to RocksDB storage engine.
:param max_transaction_size: Transaction size limit in bytes.
:type max_transaction_size: int
:param max_warning_count: Max number of warnings returned.
:type max_warning_count: int
:param intermediate_commit_count: Max number of operations after
which an intermediate commit is performed automatically. Applies
only to RocksDB storage engine.
which an intermediate commit is performed automatically.
:type intermediate_commit_count: int
:param intermediate_commit_size: Max size of operations in bytes after
which an intermediate commit is performed automatically. Applies
only to RocksDB storage engine.
which an intermediate commit is performed automatically.
:type intermediate_commit_size: int
:param satellite_sync_wait: Number of seconds in which the server must
synchronize the satellite collections involved in the query. When
Expand All @@ -214,11 +211,10 @@ def execute(self,
entirety. Results are either returned right away (if the result set
is small enough), or stored server-side and accessible via cursors
(while respecting the ttl). You should use this parameter only for
short-running queries or without exclusive locks (write-locks on
MMFiles). Note: parameters **cache**, **count** and **full_count**
do not work for streaming queries. Query statistics, warnings and
profiling data are made available only after the query is finished.
Default value is False.
short-running queries or without exclusive locks. Note: parameters
**cache**, **count** and **full_count** do not work for streaming
queries. Query statistics, warnings and profiling data are made
available only after the query is finished. Default value is False.
:type stream: bool
:param skip_inaccessible_cols: If set to True, collections without user
access are skipped, and query executes normally instead of raising
Expand Down
38 changes: 1 addition & 37 deletions arango/collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
CollectionResponsibleShardError,
CollectionRenameError,
CollectionRevisionError,
CollectionRotateJournalError,
CollectionStatisticsError,
CollectionTruncateError,
CollectionUnloadError,
Expand Down Expand Up @@ -307,22 +306,18 @@ def response_handler(resp):

return self._execute(request, response_handler)

def configure(self, sync=None, journal_size=None):
def configure(self, sync=None):
"""Configure collection properties.

:param sync: Block until operations are synchronized to disk.
:type sync: bool
:param journal_size: Journal size in bytes.
:type journal_size: int
:return: New collection properties.
:rtype: dict
:raise arango.exceptions.CollectionConfigureError: If operation fails.
"""
data = {}
if sync is not None:
data['waitForSync'] = sync
if journal_size is not None:
data['journalSize'] = journal_size

request = Request(
method='put',
Expand Down Expand Up @@ -355,18 +350,6 @@ def response_handler(resp):
raise CollectionStatisticsError(resp, request)

stats = resp.body.get('figures', resp.body)
for f in ['compactors', 'datafiles', 'journals']:
if f in stats and 'fileSize' in stats[f]: # pragma: no cover
stats[f]['file_size'] = stats[f].pop('fileSize')
if 'compactionStatus' in stats: # pragma: no cover
status = stats.pop('compactionStatus')
if 'bytesRead' in status:
status['bytes_read'] = status.pop('bytesRead')
if 'bytesWritten' in status:
status['bytes_written'] = status.pop('bytesWritten')
if 'filesCombined' in status:
status['files_combined'] = status.pop('filesCombined')
stats['compaction_status'] = status
if 'documentReferences' in stats: # pragma: no cover
stats['document_refs'] = stats.pop('documentReferences')
if 'lastTick' in stats: # pragma: no cover
Expand Down Expand Up @@ -470,25 +453,6 @@ def response_handler(resp):

return self._execute(request, response_handler)

def rotate(self):
"""Rotate the collection journal.

:return: True if collection journal was rotated successfully.
:rtype: bool
:raise arango.exceptions.CollectionRotateJournalError: If rotate fails.
"""
request = Request(
method='put',
endpoint='/_api/collection/{}/rotate'.format(self.name),
)

def response_handler(resp):
if not resp.is_success:
raise CollectionRotateJournalError(resp, request)
return True # pragma: no cover

return self._execute(request, response_handler)

def truncate(self):
"""Delete all documents in the collection.

Expand Down
43 changes: 5 additions & 38 deletions arango/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,20 +238,17 @@ def execute_transaction(self,
ArangoDB server waits indefinitely. If not set, system default
value is used.
:type timeout: int
:param max_size: Max transaction size limit in bytes. Applies only
to RocksDB storage engine.
:param max_size: Max transaction size limit in bytes.
:type max_size: int
:param allow_implicit: If set to True, undeclared read collections are
loaded lazily. If set to False, transaction fails on any undeclared
collections.
:type allow_implicit: bool
:param intermediate_commit_count: Max number of operations after which
an intermediate commit is performed automatically. Applies only to
RocksDB storage engine.
an intermediate commit is performed automatically.
:type intermediate_commit_count: int
:param intermediate_commit_size: Max size of operations in bytes after
which an intermediate commit is performed automatically. Applies
only to RocksDB storage engine.
which an intermediate commit is performed automatically.
:type intermediate_commit_size: int
:return: Return value of **command**.
:rtype: str | unicode
Expand Down Expand Up @@ -948,18 +945,14 @@ def response_handler(resp):
def create_collection(self,
name,
sync=False,
compact=True,
system=False,
journal_size=None,
edge=False,
volatile=False,
user_keys=True,
key_increment=None,
key_offset=None,
key_generator='traditional',
shard_fields=None,
shard_count=None,
index_bucket_count=None,
replication_factor=None,
shard_like=None,
sync_replication=None,
Expand All @@ -974,21 +967,11 @@ def create_collection(self,
:param sync: If set to True, document operations via the collection
will block until synchronized to disk by default.
:type sync: bool
:param compact: If set to True, the collection is compacted. Applies
only to MMFiles storage engine.
:type compact: bool
:param system: If set to True, a system collection is created. The
collection name must have leading underscore "_" character.
:type system: bool
:param journal_size: Max size of the journal in bytes.
:type journal_size: int
:param edge: If set to True, an edge collection is created.
:type edge: bool
:param volatile: If set to True, collection data is kept in-memory only
and not made persistent. Unloading the collection will cause the
collection data to be discarded. Stopping or re-starting the server
will also cause full loss of data.
:type volatile: bool
:param key_generator: Used for generating document keys. Allowed values
are "traditional" or "autoincrement".
:type key_generator: str | unicode
Expand All @@ -1006,14 +989,6 @@ def create_collection(self,
:type shard_fields: [str | unicode]
:param shard_count: Number of shards to create.
:type shard_count: int
:param index_bucket_count: Number of buckets into which indexes using
hash tables are split. The default is 16, and this number has to be
a power of 2 and less than or equal to 1024. For large collections,
one should increase this to avoid long pauses when the hash table
has to be initially built or re-sized, since buckets are re-sized
individually and can be initially built in parallel. For instance,
64 may be a sensible value for 100 million documents.
:type index_bucket_count: int
:param replication_factor: Number of copies of each shard on different
servers in a cluster. Allowed values are 1 (only one copy is kept
and no synchronous replication), and n (n-1 replicas are kept and
Expand Down Expand Up @@ -1070,20 +1045,14 @@ def create_collection(self,
data = {
'name': name,
'waitForSync': sync,
'doCompact': compact,
'isSystem': system,
'isVolatile': volatile,
'keyOptions': key_options,
'type': 3 if edge else 2
}
if journal_size is not None:
data['journalSize'] = journal_size
if shard_count is not None:
data['numberOfShards'] = shard_count
if shard_fields is not None:
data['shardKeys'] = shard_fields
if index_bucket_count is not None:
data['indexBuckets'] = index_bucket_count
if replication_factor is not None:
data['replicationFactor'] = replication_factor
if shard_like is not None:
Expand Down Expand Up @@ -2518,8 +2487,7 @@ def begin_transaction(self,
given, a default value is used. Setting it to 0 disables the
timeout.
:type lock_timeout: int
:param max_size: Max transaction size in bytes. Applicable to RocksDB
storage engine only.
:param max_size: Max transaction size in bytes.
:type max_size:
:return: Database API wrapper object specifically for transactions.
:rtype: arango.database.TransactionDatabase
Expand Down Expand Up @@ -2640,8 +2608,7 @@ class TransactionDatabase(Database):
:param lock_timeout: Timeout for waiting on collection locks. If not given,
a default value is used. Setting it to 0 disables the timeout.
:type lock_timeout: int
:param max_size: Max transaction size in bytes. Applicable to RocksDB
storage engine only.
:param max_size: Max transaction size in bytes.
:type max_size: int
"""

Expand Down
4 changes: 0 additions & 4 deletions arango/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,10 +245,6 @@ class CollectionUnloadError(ArangoServerError):
"""Failed to unload collection."""


class CollectionRotateJournalError(ArangoServerError):
"""Failed to rotate collection journal."""


class CollectionRecalculateCountError(ArangoServerError):
"""Failed to recalculate document count."""

Expand Down
3 changes: 1 addition & 2 deletions arango/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,8 +302,7 @@ class TransactionExecutor(Executor):
:param lock_timeout: Timeout for waiting on collection locks. If not given,
a default value is used. Setting it to 0 disables the timeout.
:type lock_timeout: int
:param max_size: Max transaction size in bytes. Applicable to RocksDB
storage engine only.
:param max_size: Max transaction size in bytes.
:type max_size: int
"""
context = 'transaction'
Expand Down
10 changes: 0 additions & 10 deletions arango/formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,16 +145,6 @@ def format_collection(body): # pragma: no cover
if 'writeConcern' in body:
result['write_concern'] = body['writeConcern']

# MMFiles only
if 'doCompact' in body:
result['compact'] = body['doCompact']
if 'journalSize' in body:
result['journal_size'] = body['journalSize']
if 'isVolatile' in body:
result['volatile'] = body['isVolatile']
if 'indexBuckets' in body:
result['index_bucket_count'] = body['indexBuckets']

# Cluster only
if 'shards' in body:
result['shards'] = body['shards']
Expand Down
32 changes: 2 additions & 30 deletions arango/replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self, connection, executor):
def inventory(self, batch_id, include_system=None, all_databases=None):
"""Return an overview of collections and indexes.

:param batch_id: Batch ID. For RocksDB engine only.
:param batch_id: Batch ID.
:type batch_id: str | unicode
:param include_system: Include system collections in the result.
Default value is True.
Expand Down Expand Up @@ -149,12 +149,7 @@ def response_handler(resp):
def dump(self,
collection,
batch_id=None,
lower=None,
upper=None,
chunk_size=None,
include_system=None,
ticks=None,
flush=None,
deserialize=False):
"""Return the events data of one collection.

Expand All @@ -163,21 +158,8 @@ def dump(self,
:param chunk_size: Size of the result in bytes. This value is honored
approximately only.
:type chunk_size: int
:param batch_id: Batch ID. For RocksDB engine only.
:param batch_id: Batch ID.
:type batch_id: str | unicode
:param lower: Lower bound tick value for results. For MMFiles only.
:type lower: str | unicode
:param upper: Upper bound tick value for results. For MMFiles only.
:type upper: str | unicode
:param include_system: Include system collections in the result. For
MMFiles only. Default value is True.
:type include_system: bool
:param ticks: Whether to include tick values in the dump. For MMFiles
only. Default value is True.
:type ticks: bool
:param flush: Whether to flush the WAL before dumping. Default value is
True.
:type flush: bool
:param deserialize: Deserialize the response content. Default is False.
:type deserialize: bool
:return: Collection events data.
Expand All @@ -190,16 +172,6 @@ def dump(self,
params['chunkSize'] = chunk_size
if batch_id is not None:
params['batchId'] = batch_id
if lower is not None:
params['from'] = lower
if upper is not None:
params['to'] = upper
if include_system is not None:
params['includeSystem'] = include_system
if ticks is not None:
params['ticks'] = ticks
if flush is not None:
params['flush '] = flush

request = Request(
method='get',
Expand Down
3 changes: 1 addition & 2 deletions arango/wal.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,8 +246,7 @@ def tail(self,
:param client_info: Short description of the client, used for
informative purposes only.
:type client_info: str | unicode
:param barrier_id: ID of barrier used to keep WAL entries around. Only
required for the MMFiles storage engine.
:param barrier_id: ID of barrier used to keep WAL entries around.
:type barrier_id: int
:param deserialize: Deserialize the response content. Default is False.
:type deserialize: bool
Expand Down
2 changes: 1 addition & 1 deletion docs/collection.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ Here is an example showing how you can manage standard collections:
students.load()
students.unload()
students.truncate()
students.configure(journal_size=3000000)
students.configure()

# Delete the collection.
db.delete_collection('students')
Expand Down
Loading