Skip to content

Commit e6f1ef2

Browse files
authored
Merge pull request #140 from 153957/remove-mmfiles
Remove MMFiles references and exclusive options
2 parents 25dcc28 + 2799c9c commit e6f1ef2

File tree

12 files changed

+20
-159
lines changed

12 files changed

+20
-159
lines changed

arango/aql.py

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -183,18 +183,15 @@ def execute(self,
183183
:param profile: Return additional profiling details in the cursor,
184184
unless the query cache is used.
185185
:type profile: bool
186-
:param max_transaction_size: Transaction size limit in bytes. Applies
187-
only to RocksDB storage engine.
186+
:param max_transaction_size: Transaction size limit in bytes.
188187
:type max_transaction_size: int
189188
:param max_warning_count: Max number of warnings returned.
190189
:type max_warning_count: int
191190
:param intermediate_commit_count: Max number of operations after
192-
which an intermediate commit is performed automatically. Applies
193-
only to RocksDB storage engine.
191+
which an intermediate commit is performed automatically.
194192
:type intermediate_commit_count: int
195193
:param intermediate_commit_size: Max size of operations in bytes after
196-
which an intermediate commit is performed automatically. Applies
197-
only to RocksDB storage engine.
194+
which an intermediate commit is performed automatically.
198195
:type intermediate_commit_size: int
199196
:param satellite_sync_wait: Number of seconds in which the server must
200197
synchronize the satellite collections involved in the query. When
@@ -214,11 +211,10 @@ def execute(self,
214211
entirety. Results are either returned right away (if the result set
215212
is small enough), or stored server-side and accessible via cursors
216213
(while respecting the ttl). You should use this parameter only for
217-
short-running queries or without exclusive locks (write-locks on
218-
MMFiles). Note: parameters **cache**, **count** and **full_count**
219-
do not work for streaming queries. Query statistics, warnings and
220-
profiling data are made available only after the query is finished.
221-
Default value is False.
214+
short-running queries or without exclusive locks. Note: parameters
215+
**cache**, **count** and **full_count** do not work for streaming
216+
queries. Query statistics, warnings and profiling data are made
217+
available only after the query is finished. Default value is False.
222218
:type stream: bool
223219
:param skip_inaccessible_cols: If set to True, collections without user
224220
access are skipped, and query executes normally instead of raising

arango/collection.py

Lines changed: 1 addition & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
CollectionResponsibleShardError,
1616
CollectionRenameError,
1717
CollectionRevisionError,
18-
CollectionRotateJournalError,
1918
CollectionStatisticsError,
2019
CollectionTruncateError,
2120
CollectionUnloadError,
@@ -307,22 +306,18 @@ def response_handler(resp):
307306

308307
return self._execute(request, response_handler)
309308

310-
def configure(self, sync=None, journal_size=None):
309+
def configure(self, sync=None):
311310
"""Configure collection properties.
312311
313312
:param sync: Block until operations are synchronized to disk.
314313
:type sync: bool
315-
:param journal_size: Journal size in bytes.
316-
:type journal_size: int
317314
:return: New collection properties.
318315
:rtype: dict
319316
:raise arango.exceptions.CollectionConfigureError: If operation fails.
320317
"""
321318
data = {}
322319
if sync is not None:
323320
data['waitForSync'] = sync
324-
if journal_size is not None:
325-
data['journalSize'] = journal_size
326321

327322
request = Request(
328323
method='put',
@@ -355,18 +350,6 @@ def response_handler(resp):
355350
raise CollectionStatisticsError(resp, request)
356351

357352
stats = resp.body.get('figures', resp.body)
358-
for f in ['compactors', 'datafiles', 'journals']:
359-
if f in stats and 'fileSize' in stats[f]: # pragma: no cover
360-
stats[f]['file_size'] = stats[f].pop('fileSize')
361-
if 'compactionStatus' in stats: # pragma: no cover
362-
status = stats.pop('compactionStatus')
363-
if 'bytesRead' in status:
364-
status['bytes_read'] = status.pop('bytesRead')
365-
if 'bytesWritten' in status:
366-
status['bytes_written'] = status.pop('bytesWritten')
367-
if 'filesCombined' in status:
368-
status['files_combined'] = status.pop('filesCombined')
369-
stats['compaction_status'] = status
370353
if 'documentReferences' in stats: # pragma: no cover
371354
stats['document_refs'] = stats.pop('documentReferences')
372355
if 'lastTick' in stats: # pragma: no cover
@@ -470,25 +453,6 @@ def response_handler(resp):
470453

471454
return self._execute(request, response_handler)
472455

473-
def rotate(self):
474-
"""Rotate the collection journal.
475-
476-
:return: True if collection journal was rotated successfully.
477-
:rtype: bool
478-
:raise arango.exceptions.CollectionRotateJournalError: If rotate fails.
479-
"""
480-
request = Request(
481-
method='put',
482-
endpoint='/_api/collection/{}/rotate'.format(self.name),
483-
)
484-
485-
def response_handler(resp):
486-
if not resp.is_success:
487-
raise CollectionRotateJournalError(resp, request)
488-
return True # pragma: no cover
489-
490-
return self._execute(request, response_handler)
491-
492456
def truncate(self):
493457
"""Delete all documents in the collection.
494458

arango/database.py

Lines changed: 5 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -238,20 +238,17 @@ def execute_transaction(self,
238238
ArangoDB server waits indefinitely. If not set, system default
239239
value is used.
240240
:type timeout: int
241-
:param max_size: Max transaction size limit in bytes. Applies only
242-
to RocksDB storage engine.
241+
:param max_size: Max transaction size limit in bytes.
243242
:type max_size: int
244243
:param allow_implicit: If set to True, undeclared read collections are
245244
loaded lazily. If set to False, transaction fails on any undeclared
246245
collections.
247246
:type allow_implicit: bool
248247
:param intermediate_commit_count: Max number of operations after which
249-
an intermediate commit is performed automatically. Applies only to
250-
RocksDB storage engine.
248+
an intermediate commit is performed automatically.
251249
:type intermediate_commit_count: int
252250
:param intermediate_commit_size: Max size of operations in bytes after
253-
which an intermediate commit is performed automatically. Applies
254-
only to RocksDB storage engine.
251+
which an intermediate commit is performed automatically.
255252
:type intermediate_commit_size: int
256253
:return: Return value of **command**.
257254
:rtype: str | unicode
@@ -948,18 +945,14 @@ def response_handler(resp):
948945
def create_collection(self,
949946
name,
950947
sync=False,
951-
compact=True,
952948
system=False,
953-
journal_size=None,
954949
edge=False,
955-
volatile=False,
956950
user_keys=True,
957951
key_increment=None,
958952
key_offset=None,
959953
key_generator='traditional',
960954
shard_fields=None,
961955
shard_count=None,
962-
index_bucket_count=None,
963956
replication_factor=None,
964957
shard_like=None,
965958
sync_replication=None,
@@ -974,21 +967,11 @@ def create_collection(self,
974967
:param sync: If set to True, document operations via the collection
975968
will block until synchronized to disk by default.
976969
:type sync: bool
977-
:param compact: If set to True, the collection is compacted. Applies
978-
only to MMFiles storage engine.
979-
:type compact: bool
980970
:param system: If set to True, a system collection is created. The
981971
collection name must have leading underscore "_" character.
982972
:type system: bool
983-
:param journal_size: Max size of the journal in bytes.
984-
:type journal_size: int
985973
:param edge: If set to True, an edge collection is created.
986974
:type edge: bool
987-
:param volatile: If set to True, collection data is kept in-memory only
988-
and not made persistent. Unloading the collection will cause the
989-
collection data to be discarded. Stopping or re-starting the server
990-
will also cause full loss of data.
991-
:type volatile: bool
992975
:param key_generator: Used for generating document keys. Allowed values
993976
are "traditional" or "autoincrement".
994977
:type key_generator: str | unicode
@@ -1006,14 +989,6 @@ def create_collection(self,
1006989
:type shard_fields: [str | unicode]
1007990
:param shard_count: Number of shards to create.
1008991
:type shard_count: int
1009-
:param index_bucket_count: Number of buckets into which indexes using
1010-
hash tables are split. The default is 16, and this number has to be
1011-
a power of 2 and less than or equal to 1024. For large collections,
1012-
one should increase this to avoid long pauses when the hash table
1013-
has to be initially built or re-sized, since buckets are re-sized
1014-
individually and can be initially built in parallel. For instance,
1015-
64 may be a sensible value for 100 million documents.
1016-
:type index_bucket_count: int
1017992
:param replication_factor: Number of copies of each shard on different
1018993
servers in a cluster. Allowed values are 1 (only one copy is kept
1019994
and no synchronous replication), and n (n-1 replicas are kept and
@@ -1070,20 +1045,14 @@ def create_collection(self,
10701045
data = {
10711046
'name': name,
10721047
'waitForSync': sync,
1073-
'doCompact': compact,
10741048
'isSystem': system,
1075-
'isVolatile': volatile,
10761049
'keyOptions': key_options,
10771050
'type': 3 if edge else 2
10781051
}
1079-
if journal_size is not None:
1080-
data['journalSize'] = journal_size
10811052
if shard_count is not None:
10821053
data['numberOfShards'] = shard_count
10831054
if shard_fields is not None:
10841055
data['shardKeys'] = shard_fields
1085-
if index_bucket_count is not None:
1086-
data['indexBuckets'] = index_bucket_count
10871056
if replication_factor is not None:
10881057
data['replicationFactor'] = replication_factor
10891058
if shard_like is not None:
@@ -2518,8 +2487,7 @@ def begin_transaction(self,
25182487
given, a default value is used. Setting it to 0 disables the
25192488
timeout.
25202489
:type lock_timeout: int
2521-
:param max_size: Max transaction size in bytes. Applicable to RocksDB
2522-
storage engine only.
2490+
:param max_size: Max transaction size in bytes.
25232491
:type max_size:
25242492
:return: Database API wrapper object specifically for transactions.
25252493
:rtype: arango.database.TransactionDatabase
@@ -2640,8 +2608,7 @@ class TransactionDatabase(Database):
26402608
:param lock_timeout: Timeout for waiting on collection locks. If not given,
26412609
a default value is used. Setting it to 0 disables the timeout.
26422610
:type lock_timeout: int
2643-
:param max_size: Max transaction size in bytes. Applicable to RocksDB
2644-
storage engine only.
2611+
:param max_size: Max transaction size in bytes.
26452612
:type max_size: int
26462613
"""
26472614

arango/exceptions.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -245,10 +245,6 @@ class CollectionUnloadError(ArangoServerError):
245245
"""Failed to unload collection."""
246246

247247

248-
class CollectionRotateJournalError(ArangoServerError):
249-
"""Failed to rotate collection journal."""
250-
251-
252248
class CollectionRecalculateCountError(ArangoServerError):
253249
"""Failed to recalculate document count."""
254250

arango/executor.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -302,8 +302,7 @@ class TransactionExecutor(Executor):
302302
:param lock_timeout: Timeout for waiting on collection locks. If not given,
303303
a default value is used. Setting it to 0 disables the timeout.
304304
:type lock_timeout: int
305-
:param max_size: Max transaction size in bytes. Applicable to RocksDB
306-
storage engine only.
305+
:param max_size: Max transaction size in bytes.
307306
:type max_size: int
308307
"""
309308
context = 'transaction'

arango/formatter.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -145,16 +145,6 @@ def format_collection(body): # pragma: no cover
145145
if 'writeConcern' in body:
146146
result['write_concern'] = body['writeConcern']
147147

148-
# MMFiles only
149-
if 'doCompact' in body:
150-
result['compact'] = body['doCompact']
151-
if 'journalSize' in body:
152-
result['journal_size'] = body['journalSize']
153-
if 'isVolatile' in body:
154-
result['volatile'] = body['isVolatile']
155-
if 'indexBuckets' in body:
156-
result['index_bucket_count'] = body['indexBuckets']
157-
158148
# Cluster only
159149
if 'shards' in body:
160150
result['shards'] = body['shards']

arango/replication.py

Lines changed: 2 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def __init__(self, connection, executor):
4040
def inventory(self, batch_id, include_system=None, all_databases=None):
4141
"""Return an overview of collections and indexes.
4242
43-
:param batch_id: Batch ID. For RocksDB engine only.
43+
:param batch_id: Batch ID.
4444
:type batch_id: str | unicode
4545
:param include_system: Include system collections in the result.
4646
Default value is True.
@@ -149,12 +149,7 @@ def response_handler(resp):
149149
def dump(self,
150150
collection,
151151
batch_id=None,
152-
lower=None,
153-
upper=None,
154152
chunk_size=None,
155-
include_system=None,
156-
ticks=None,
157-
flush=None,
158153
deserialize=False):
159154
"""Return the events data of one collection.
160155
@@ -163,21 +158,8 @@ def dump(self,
163158
:param chunk_size: Size of the result in bytes. This value is honored
164159
approximately only.
165160
:type chunk_size: int
166-
:param batch_id: Batch ID. For RocksDB engine only.
161+
:param batch_id: Batch ID.
167162
:type batch_id: str | unicode
168-
:param lower: Lower bound tick value for results. For MMFiles only.
169-
:type lower: str | unicode
170-
:param upper: Upper bound tick value for results. For MMFiles only.
171-
:type upper: str | unicode
172-
:param include_system: Include system collections in the result. For
173-
MMFiles only. Default value is True.
174-
:type include_system: bool
175-
:param ticks: Whether to include tick values in the dump. For MMFiles
176-
only. Default value is True.
177-
:type ticks: bool
178-
:param flush: Whether to flush the WAL before dumping. Default value is
179-
True.
180-
:type flush: bool
181163
:param deserialize: Deserialize the response content. Default is False.
182164
:type deserialize: bool
183165
:return: Collection events data.
@@ -190,16 +172,6 @@ def dump(self,
190172
params['chunkSize'] = chunk_size
191173
if batch_id is not None:
192174
params['batchId'] = batch_id
193-
if lower is not None:
194-
params['from'] = lower
195-
if upper is not None:
196-
params['to'] = upper
197-
if include_system is not None:
198-
params['includeSystem'] = include_system
199-
if ticks is not None:
200-
params['ticks'] = ticks
201-
if flush is not None:
202-
params['flush '] = flush
203175

204176
request = Request(
205177
method='get',

arango/wal.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -246,8 +246,7 @@ def tail(self,
246246
:param client_info: Short description of the client, used for
247247
informative purposes only.
248248
:type client_info: str | unicode
249-
:param barrier_id: ID of barrier used to keep WAL entries around. Only
250-
required for the MMFiles storage engine.
249+
:param barrier_id: ID of barrier used to keep WAL entries around.
251250
:type barrier_id: int
252251
:param deserialize: Deserialize the response content. Default is False.
253252
:type deserialize: bool

docs/collection.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ Here is an example showing how you can manage standard collections:
4646
students.load()
4747
students.unload()
4848
students.truncate()
49-
students.configure(journal_size=3000000)
49+
students.configure()
5050

5151
# Delete the collection.
5252
db.delete_collection('students')

0 commit comments

Comments
 (0)