diff --git a/.gitignore b/.gitignore
index cb2d49cf..ef3e73cb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,5 @@
*.coverage
.cache/
tests/__pycache__/
+*.DS_Store
+venv
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
index b871cd16..8c4986c0 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,20 +1,21 @@
-language: python
sudo: false
+language: python
python:
- 2.7
- 3.4
- 3.5
- 3.6
+services:
+ - docker
before_install:
- - sh scripts/setup_arangodb.sh
+ - docker run --name arango -d -p 8529:8529 -e ARANGO_ROOT_PASSWORD=passwd arangodb/arangodb:3.3.8
+ - docker cp tests/static/service.zip arango:/tmp/service.zip
install:
- - pip install coverage
- - pip install pytest
- - pip install pytest-cov
- - pip install python-coveralls
- - python setup.py install
+ - pip install flake8 mock pytest pytest-cov python-coveralls sphinx
+ - pip install .
script:
- - py.test --cov-report= --cov=arango tests/
+ - python -m flake8
+ - python -m sphinx -b doctest docs build
+ - py.test --complete -s -v --cov=arango
after_success:
- coveralls
- - pkill -9 -f arango
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 00000000..05ac65be
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1 @@
+include README.rst LICENSE
\ No newline at end of file
diff --git a/README.rst b/README.rst
index 1d1edd36..dbdc9493 100644
--- a/README.rst
+++ b/README.rst
@@ -32,22 +32,28 @@
|
-Welcome to the GitHub page for **python-arango**, a Python driver for
-`ArangoDB `__.
+Welcome to the GitHub page for **python-arango**, a Python driver for ArangoDB_.
+
+Announcements
+=============
+
+- Python-arango version `4.0.0`_ is now out!
+- Please see the releases_ page for details on latest updates.
Features
========
-- Clean, Pythonic interface
+- Clean Pythonic interface
- Lightweight
- High ArangoDB REST API coverage
Compatibility
=============
-- Python versions 2.7.x, 3.4.x, 3.5.x and 3.6.x are supported
-- Latest version of python-arango (3.x) supports ArangoDB 3.x only
-- Older versions of python-arango support ArangoDB 1.x ~ 2.x only
+- Python versions 2.7, 3.4, 3.5 and 3.6 are supported
+- Python-arango 4.x supports ArangoDB 3.3+ (recommended)
+- Python-arango 3.x supports ArangoDB 3.0 ~ 3.2 only
+- Python-arango 2.x supports ArangoDB 1.x ~ 2.x only
Installation
============
@@ -65,10 +71,7 @@ To install the latest version directly from GitHub_:
~$ pip install -e git+git@github.com:joowani/python-arango.git@master#egg=python-arango
-You may need to use ``sudo`` depending on your environment setup.
-
-.. _PyPi: https://pypi.python.org/pypi/python-arango
-.. _GitHub: https://github.com/joowani/python-arango
+You may need to use ``sudo`` depending on your environment.
Getting Started
===============
@@ -79,79 +82,98 @@ Here is a simple usage example:
from arango import ArangoClient
- # Initialize the client for ArangoDB
- client = ArangoClient(
- protocol='http',
- host='localhost',
- port=8529,
- username='root',
- password='',
- enable_logging=True
- )
+ # Initialize the client for ArangoDB.
+ client = ArangoClient(protocol='http', host='localhost', port=8529)
+
+ # Connect to "_system" database as root user.
+ sys_db = client.db('_system', username='root', password='passwd')
- # Create a new database named "my_database"
- db = client.create_database('my_database')
+ # Create a new database named "test".
+ sys_db.create_database('test')
- # Create a new collection named "students"
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Create a new collection named "students".
students = db.create_collection('students')
- # Add a hash index to the collection
+ # Add a hash index to the collection.
students.add_hash_index(fields=['name'], unique=True)
- # Insert new documents into the collection
- students.insert({'name': 'jane', 'age': 19})
+ # Insert new documents into the collection.
+ students.insert({'name': 'jane', 'age': 39})
students.insert({'name': 'josh', 'age': 18})
- students.insert({'name': 'jake', 'age': 21})
+ students.insert({'name': 'judy', 'age': 21})
- # Execute an AQL query
- result = db.aql.execute('FOR s IN students RETURN s')
- print([student['name'] for student in result])
+ # Execute an AQL query and iterate through the result cursor.
+ cursor = db.aql.execute('FOR doc IN students RETURN doc')
+ student_names = [document['name'] for document in cursor]
-Here is another example involving graphs:
+Here is another example with graphs:
.. code-block:: python
from arango import ArangoClient
- client = ArangoClient()
+ # Initialize the client for ArangoDB.
+ client = ArangoClient(protocol='http', host='localhost', port=8529)
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
- # Create a new graph
- graph = client.db('my_database').create_graph('my_graph')
+ # Create a new graph named "school".
+ graph = db.create_graph('school')
+
+ # Create vertex collections for the graph.
students = graph.create_vertex_collection('students')
- courses = graph.create_vertex_collection('courses')
- takes = graph.create_edge_definition(
- name='takes',
- from_collections=['students'],
- to_collections=['courses']
+ lectures = graph.create_vertex_collection('lectures')
+
+ # Create an edge definition (relation) for the graph.
+ register = graph.create_edge_definition(
+ edge_collection='register',
+ from_vertex_collections=['students'],
+ to_vertex_collections=['lectures']
)
- # Insert vertices
+ # Insert vertex documents into "students" (from) vertex collection.
students.insert({'_key': '01', 'full_name': 'Anna Smith'})
students.insert({'_key': '02', 'full_name': 'Jake Clark'})
students.insert({'_key': '03', 'full_name': 'Lisa Jones'})
- courses.insert({'_key': 'MAT101', 'title': 'Calculus'})
- courses.insert({'_key': 'STA101', 'title': 'Statistics'})
- courses.insert({'_key': 'CSC101', 'title': 'Algorithms'})
-
- # Insert edges
- takes.insert({'_from': 'students/01', '_to': 'courses/MAT101'})
- takes.insert({'_from': 'students/01', '_to': 'courses/STA101'})
- takes.insert({'_from': 'students/01', '_to': 'courses/CSC101'})
- takes.insert({'_from': 'students/02', '_to': 'courses/MAT101'})
- takes.insert({'_from': 'students/02', '_to': 'courses/STA101'})
- takes.insert({'_from': 'students/03', '_to': 'courses/CSC101'})
-
- # Traverse the graph in outbound direction, breadth-first
- traversal_results = graph.traverse(
+ # Insert vertex documents into "lectures" (to) vertex collection.
+ lectures.insert({'_key': 'MAT101', 'title': 'Calculus'})
+ lectures.insert({'_key': 'STA101', 'title': 'Statistics'})
+ lectures.insert({'_key': 'CSC101', 'title': 'Algorithms'})
+
+ # Insert edge documents into "register" edge collection.
+ register.insert({'_from': 'students/01', '_to': 'lectures/MAT101'})
+ register.insert({'_from': 'students/01', '_to': 'lectures/STA101'})
+ register.insert({'_from': 'students/01', '_to': 'lectures/CSC101'})
+ register.insert({'_from': 'students/02', '_to': 'lectures/MAT101'})
+ register.insert({'_from': 'students/02', '_to': 'lectures/STA101'})
+ register.insert({'_from': 'students/03', '_to': 'lectures/CSC101'})
+
+ # Traverse the graph in outbound direction, breadth-first.
+ result = graph.traverse(
start_vertex='students/01',
- strategy='bfs',
- direction='outbound'
+ direction='outbound',
+ strategy='breadthfirst'
)
- print(traversal_results['vertices'])
-Please read the full `API documentation`_ for more details!
+Check out the documentation_ for more details.
-.. _API documentation:
+Contributing
+============
+
+Please take a look at this page_ before submitting a pull request. Thanks!
+
+.. _ArangoDB: https://www.arangodb.com
+.. _4.0.0: https://github.com/joowani/python-arango/releases/tag/4.0.0
+.. _releases: https://github.com/joowani/python-arango/releases
+.. _PyPi: https://pypi.python.org/pypi/python-arango
+.. _GitHub: https://github.com/joowani/python-arango
+.. _documentation:
http://python-driver-for-arangodb.readthedocs.io/en/master/index.html
+.. _page:
+ http://python-driver-for-arangodb.readthedocs.io/en/master/contributing.html
\ No newline at end of file
diff --git a/arango/__init__.py b/arango/__init__.py
index 1739ceda..f54d74ef 100644
--- a/arango/__init__.py
+++ b/arango/__init__.py
@@ -1,2 +1,3 @@
-from arango.client import ArangoClient
-from arango.client import ArangoError
+from arango.client import ArangoClient # noqa: F401
+from arango.exceptions import * # noqa: F401 F403
+from arango.http import * # noqa: F401 F403
diff --git a/arango/api.py b/arango/api.py
index 2af09321..bf4a70c0 100644
--- a/arango/api.py
+++ b/arango/api.py
@@ -1,40 +1,58 @@
from __future__ import absolute_import, unicode_literals
-from functools import wraps
+__all__ = ['APIWrapper']
class APIWrapper(object):
- """ArangoDB API wrapper base class.
+ """Base class for API wrappers.
- This class is meant to be used internally only.
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
"""
- def __getattribute__(self, attr):
- method = object.__getattribute__(self, attr)
- conn = object.__getattribute__(self, '_conn')
-
- if not getattr(method, 'api_method', False):
- return method
-
- @wraps(method)
- def wrapped_method(*args, **kwargs):
- request, handler = method(*args, **kwargs)
- return conn.handle_request(request, handler)
- return wrapped_method
-
-
-def api_method(method):
- """Decorator used to mark ArangoDB API methods.
-
- Methods decorated by this should return two things:
-
- - An instance of :class:`arango.request.Request`
- - A handler that takes an instance of :class:`arango.response.Response`
-
- :param method: the method to wrap
- :type method: callable
- :returns: the wrapped method
- :rtype: callable
- """
- setattr(method, 'api_method', True)
- return method
+ def __init__(self, connection, executor):
+ self._conn = connection
+ self._executor = executor
+ self._is_transaction = self.context == 'transaction'
+
+ @property
+ def db_name(self):
+ """Return the name of the current database.
+
+ :return: Database name.
+ :rtype: str | unicode
+ """
+ return self._conn.db_name
+
+ @property
+ def username(self):
+ """Return the username.
+
+ :returns: Username.
+ :rtype: str | unicode
+ """
+ return self._conn.username
+
+ @property
+ def context(self):
+ """Return the API execution context.
+
+ :return: API execution context. Possible values are "default", "async",
+ "batch" and "transaction".
+ :rtype: str | unicode
+ """
+ return self._executor.context
+
+ def _execute(self, request, response_handler):
+ """Execute an API per execution context.
+
+ :param request: HTTP request.
+ :type request: arango.request.Request
+ :param response_handler: HTTP response handler.
+ :type response_handler: callable
+ :return: API execution result.
+ :rtype: str | unicode | bool | int | list | dict
+ """
+ return self._executor.execute(request, response_handler)
diff --git a/arango/aql.py b/arango/aql.py
index c4fc6f94..97266405 100644
--- a/arango/aql.py
+++ b/arango/aql.py
@@ -1,12 +1,20 @@
from __future__ import absolute_import, unicode_literals
-from arango.api import APIWrapper, api_method
-from arango.utils import HTTP_OK
+from json import dumps
+
+__all__ = ['AQL', 'AQLQueryCache']
+
+from arango.api import APIWrapper
from arango.cursor import Cursor
from arango.exceptions import (
AQLQueryExplainError,
AQLQueryValidateError,
AQLQueryExecuteError,
+ AQLQueryListError,
+ AQLQueryClearError,
+ AQLQueryTrackingGetError,
+ AQLQueryKillError,
+ AQLQueryTrackingSetError,
AQLFunctionCreateError,
AQLFunctionDeleteError,
AQLFunctionListError,
@@ -18,45 +26,84 @@
class AQL(APIWrapper):
- """Wrapper for invoking ArangoDB Query Language (AQL).
+ """AQL (ArangoDB Query Language) API wrapper.
- :param connection: ArangoDB database connection
+ :param connection: HTTP connection.
:type connection: arango.connection.Connection
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
"""
- def __init__(self, connection):
- self._conn = connection
- self._cache = AQLQueryCache(self._conn)
+ def __init__(self, connection, executor):
+ super(AQL, self).__init__(connection, executor)
def __repr__(self):
- return ""
+ return ''.format(self._conn.db_name)
+
+ # noinspection PyMethodMayBeStatic
+ def _format_tracking(self, body):
+ """Format the tracking properties.
+
+ :param body: Response body.
+ :type body: dict
+ :return: Formatted body.
+ :rtype: dict
+ """
+ body.pop('code', None)
+ body.pop('error', None)
+ if 'maxQueryStringLength' in body:
+ body['max_query_string_length'] = body.pop('maxQueryStringLength')
+ if 'maxSlowQueries' in body:
+ body['max_slow_queries'] = body.pop('maxSlowQueries')
+ if 'slowQueryThreshold' in body:
+ body['slow_query_threshold'] = body.pop('slowQueryThreshold')
+ if 'trackBindVars' in body:
+ body['track_bind_vars'] = body.pop('trackBindVars')
+ if 'trackSlowQueries' in body:
+ body['track_slow_queries'] = body.pop('trackSlowQueries')
+ return body
+
+ # noinspection PyMethodMayBeStatic
+ def _format_queries(self, body):
+ """Format the list of queries.
+
+ :param body: Response body.
+ :type body: dict
+ :return: Formatted body.
+ :rtype: dict
+ """
+ for query in body:
+ if 'bindVars' in query:
+ query['bind_vars'] = query.pop('bindVars')
+ if 'runTime' in query:
+ query['runtime'] = query.pop('runTime')
+ return body
@property
def cache(self):
- """Return the query cache object.
+ """Return the query cache API wrapper.
- :returns: the query cache
- :rtype: arango.query.AQLQueryCache
+ :return: Query cache API wrapper.
+ :rtype: arango.aql.AQLQueryCache
"""
- return self._cache
+ return AQLQueryCache(self._conn, self._executor)
- @api_method
def explain(self, query, all_plans=False, max_plans=None, opt_rules=None):
- """Inspect the query and return its metadata.
+ """Inspect the query and return its metadata without executing it.
- :param query: the query to inspect
+ :param query: Query to inspect.
:type query: str | unicode
- :param all_plans: if ``True`` all possible execution plans are
- returned, otherwise only the optimal one is returned
+ :param all_plans: If set to True, all possible execution plans are
+ returned in the result. If set to False, only the optimal plan
+ is returned.
:type all_plans: bool
- :param max_plans: the total number of plans generated by the optimizer
+ :param max_plans: Total number of plans generated by the optimizer.
:type max_plans: int
- :param opt_rules: the list of optimizer rules
+ :param opt_rules: List of optimizer rules.
:type opt_rules: list
- :returns: the plan or plans if `all_plans` is set to ``True``
- :rtype: list | dict
- :raises arango.exceptions.AQLQueryExplainError: if the query cannot be
- explained
+ :return: Execution plan, or plans if **all_plans** was set to True.
+ :rtype: dict | list
+ :raise arango.exceptions.AQLQueryExplainError: If explain fails.
"""
options = {'allPlans': all_plans}
if max_plans is not None:
@@ -70,23 +117,24 @@ def explain(self, query, all_plans=False, max_plans=None, opt_rules=None):
data={'query': query, 'options': options}
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise AQLQueryExplainError(res)
- return res.body['plan' if 'plan' in res.body else 'plans']
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLQueryExplainError(resp, request)
+ if 'plan' in resp.body:
+ return resp.body['plan']
+ else:
+ return resp.body['plans']
- return request, handler
+ return self._execute(request, response_handler)
- @api_method
def validate(self, query):
- """Validate the query.
+ """Parse and validate the query without executing it.
- :param query: the query to validate
+ :param query: Query to validate.
:type query: str | unicode
- :returns: whether the validation was successful
- :rtype: bool
- :raises arango.exceptions.AQLQueryValidateError: if the query cannot be
- validated
+ :return: Query details.
+ :rtype: dict
+ :raise arango.exceptions.AQLQueryValidateError: If validation fails.
"""
request = Request(
method='post',
@@ -94,51 +142,107 @@ def validate(self, query):
data={'query': query}
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise AQLQueryValidateError(res)
- res.body.pop('code', None)
- res.body.pop('error', None)
- return res.body
-
- return request, handler
-
- @api_method
- def execute(self, query, count=False, batch_size=None, ttl=None,
- bind_vars=None, full_count=None, max_plans=None,
- optimizer_rules=None):
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLQueryValidateError(resp, request)
+ body = resp.body
+ body.pop('code', None)
+ body.pop('error', None)
+ if 'bindVars' in body:
+ body['bind_vars'] = body.pop('bindVars')
+ return body
+
+ return self._execute(request, response_handler)
+
+ def execute(self,
+ query,
+ count=False,
+ batch_size=None,
+ ttl=None,
+ bind_vars=None,
+ full_count=None,
+ max_plans=None,
+ optimizer_rules=None,
+ cache=None,
+ memory_limit=0,
+ fail_on_warning=None,
+ profile=None,
+ max_transaction_size=None,
+ max_warning_count=None,
+ intermediate_commit_count=None,
+ intermediate_commit_size=None,
+ satellite_sync_wait=None,
+ read_collections=None,
+ write_collections=None):
"""Execute the query and return the result cursor.
- :param query: the AQL query to execute
+ :param query: Query to execute.
:type query: str | unicode
- :param count: whether the document count should be returned
+ :param count: If set to True, the total document count is included in
+ the result cursor.
:type count: bool
- :param batch_size: maximum number of documents in one round trip
+ :param batch_size: Number of documents fetched by the cursor in one
+ round trip.
:type batch_size: int
- :param ttl: time-to-live for the cursor (in seconds)
+ :param ttl: Server side time-to-live for the cursor in seconds.
:type ttl: int
- :param bind_vars: key-value pairs of bind parameters
+ :param bind_vars: Bind variables for the query.
:type bind_vars: dict
- :param full_count: include count before last LIMIT
- :param max_plans: maximum number of plans the optimizer generates
+ :param full_count: This parameter applies only to queries with LIMIT
+ clauses. If set to True, the number of matched documents before
+ the last LIMIT clause executed is included in teh cursor. This is
+ similar to MySQL SQL_CALC_FOUND_ROWS hint. Using this disables a
+ few LIMIT optimizations and may lead to a longer query execution.
+ :type full_count: bool
+ :param max_plans: Max number of plans the optimizer generates.
:type max_plans: int
- :param optimizer_rules: list of optimizer rules
- :type optimizer_rules: list
- :returns: document cursor
+ :param optimizer_rules: List of optimizer rules.
+ :type optimizer_rules: [str | unicode]
+ :param cache: If set to True, the query cache is used. The operation
+ mode of the query cache must be set to "on" or "demand".
+ :type cache: bool
+ :param memory_limit: Max amount of memory the query is allowed to use
+ in bytes. If the query goes over the limit, it fails with error
+ "resource limit exceeded". Value 0 indicates no limit.
+ :type memory_limit: int
+ :param fail_on_warning: If set to True, the query throws an exception
+ instead of producing a warning. This parameter can be used during
+ development to catch issues early. If set to False, warnings are
+ returned with the query result. There is a server configuration
+ option "--query.fail-on-warning" for setting the default value for
+ this behaviour so it does not need to be set per-query.
+ :type fail_on_warning: bool
+ :param profile: Return additional profiling details in the cursor,
+ unless the query cache is used.
+ :type profile: bool
+ :param max_transaction_size: Transaction size limit in bytes. Applies
+ only to RocksDB storage engine.
+ :type max_transaction_size: int
+ :param max_warning_count: Max number of warnings returned.
+ :type max_warning_count: int
+ :param intermediate_commit_count: Max number of operations after
+ which an intermediate commit is performed automatically. Applies
+ only to RocksDB storage engine.
+ :type intermediate_commit_count: int
+ :param intermediate_commit_size: Max size of operations in bytes after
+ which an intermediate commit is performed automatically. Applies
+ only to RocksDB storage engine.
+ :type intermediate_commit_size: int
+ :param satellite_sync_wait: Number of seconds in which the server must
+ synchronize the satellite collections involved in the query. When
+ the threshold is reached, the query is stopped. This parameter is
+ for enterprise version of ArangoDB only.
+ :type satellite_sync_wait: int | float
+ :param read_collections: Names of collections read during query
+ execution. This parameter is required for transactions only.
+ :type read_collections: [str | unicode]
+ :param write_collections: Names of collections written to during query
+ execution. This parameter is required for transactions only.
+ :type write_collections: [str | unicode]
+ :return: Result cursor.
:rtype: arango.cursor.Cursor
- :raises arango.exceptions.AQLQueryExecuteError: if the query cannot be
- executed
- :raises arango.exceptions.CursorCloseError: if the cursor cannot be
- closed properly
+ :raise arango.exceptions.AQLQueryExecuteError: If execute fails.
"""
- options = {}
- if full_count is not None:
- options['fullCount'] = full_count
- if max_plans is not None:
- options['maxNumberOfPlans'] = max_plans
- if optimizer_rules is not None:
- options['optimizer'] = {'rules': optimizer_rules}
-
data = {'query': query, 'count': count}
if batch_size is not None:
data['batchSize'] = batch_size
@@ -146,53 +250,225 @@ def execute(self, query, count=False, batch_size=None, ttl=None,
data['ttl'] = ttl
if bind_vars is not None:
data['bindVars'] = bind_vars
+ if cache is not None:
+ data['cache'] = cache
+ if memory_limit is not None:
+ data['memoryLimit'] = memory_limit
+
+ options = {}
+ if full_count is not None:
+ options['fullCount'] = full_count
+ if max_plans is not None:
+ options['maxNumberOfPlans'] = max_plans
+ if optimizer_rules is not None:
+ options['optimizer'] = {'rules': optimizer_rules}
+ if fail_on_warning is not None:
+ options['failOnWarning'] = fail_on_warning
+ if profile is not None:
+ options['profile'] = profile
+ if max_transaction_size is not None:
+ options['maxTransactionSize'] = max_transaction_size
+ if max_warning_count is not None:
+ options['maxWarningCount'] = max_warning_count
+ if intermediate_commit_count is not None:
+ options['intermediateCommitCount'] = intermediate_commit_count
+ if intermediate_commit_size is not None:
+ options['intermediateCommitSize'] = intermediate_commit_size
+ if satellite_sync_wait is not None:
+ options['satelliteSyncWait'] = satellite_sync_wait
if options:
data['options'] = options
+ data.update(options)
+
+ command = 'db._query({}, {}, {}).toArray()'.format(
+ dumps(query),
+ dumps(bind_vars),
+ dumps(data),
+ ) if self._is_transaction else None
request = Request(
method='post',
endpoint='/_api/cursor',
+ data=data,
+ command=command,
+ read=read_collections,
+ write=write_collections
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLQueryExecuteError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def kill(self, query_id):
+ """Kill a running query.
+
+ :param query_id: Query ID.
+ :type query_id: str | unicode
+ :return: True if kill request was sent successfully.
+ :rtype: bool
+ :raise arango.exceptions.AQLQueryKillError: If the send fails.
+ """
+ request = Request(
+ method='delete',
+ endpoint='/_api/query/{}'.format(query_id)
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLQueryKillError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ def queries(self):
+ """Return the currently running AQL queries.
+
+ :return: Running AQL queries.
+ :rtype: [dict]
+ :raise arango.exceptions.AQLQueryListError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/query/current'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLQueryListError(resp, request)
+ return self._format_queries(resp.body)
+
+ return self._execute(request, response_handler)
+
+ def slow_queries(self):
+ """Return a list of all slow AQL queries.
+
+ :return: Slow AQL queries.
+ :rtype: [dict]
+ :raise arango.exceptions.AQLQueryListError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/query/slow'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLQueryListError(resp, request)
+ return self._format_queries(resp.body)
+
+ return self._execute(request, response_handler)
+
+ def clear_slow_queries(self):
+ """Clear slow AQL queries.
+
+ :return: True if slow queries were cleared successfully.
+ :rtype: bool
+ :raise arango.exceptions.AQLQueryClearError: If operation fails.
+ """
+ request = Request(
+ method='delete',
+ endpoint='/_api/query/slow'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLQueryClearError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ def tracking(self):
+ """Return AQL query tracking properties.
+
+ :return: AQL query tracking properties.
+ :rtype: dict
+ :raise arango.exceptions.AQLQueryTrackingGetError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/query/properties'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLQueryTrackingGetError(resp, request)
+ return self._format_tracking(resp.body)
+
+ return self._execute(request, response_handler)
+
+ def set_tracking(self,
+ enabled=None,
+ max_slow_queries=None,
+ slow_query_threshold=None,
+ max_query_string_length=None,
+ track_bind_vars=None,
+ track_slow_queries=None):
+ """Configure AQL query tracking properties
+
+ :return: Updated AQL query tracking properties.
+ :rtype: dict
+ :raise arango.exceptions.AQLQueryTrackingSetError: If operation fails.
+ """
+ data = {}
+ if enabled is not None:
+ data['enabled'] = enabled
+ if max_slow_queries is not None:
+ data['maxSlowQueries'] = max_slow_queries
+ if max_query_string_length is not None:
+ data['maxQueryStringLength'] = max_query_string_length
+ if slow_query_threshold is not None:
+ data['slowQueryThreshold'] = slow_query_threshold
+ if track_bind_vars is not None:
+ data['trackBindVars'] = track_bind_vars
+ if track_slow_queries is not None:
+ data['trackSlowQueries'] = track_slow_queries
+
+ request = Request(
+ method='put',
+ endpoint='/_api/query/properties',
data=data
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise AQLQueryExecuteError(res)
- return Cursor(self._conn, res.body)
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLQueryTrackingSetError(resp, request)
+ return self._format_tracking(resp.body)
- return request, handler
+ return self._execute(request, response_handler)
- @api_method
def functions(self):
- """List the AQL functions defined in this database.
+ """List the AQL functions defined in the database.
- :returns: a mapping of AQL function names to its javascript code
+ :return: Mapping of AQL function names to their javascript code.
:rtype: dict
- :raises arango.exceptions.AQLFunctionListError: if the AQL functions
- cannot be retrieved
+ :raise arango.exceptions.AQLFunctionListError: If retrieval fails.
"""
- request = Request(method='get', endpoint='/_api/aqlfunction')
+ request = Request(
+ method='get',
+ endpoint='/_api/aqlfunction'
+ )
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise AQLFunctionListError(res)
- body = res.body or {}
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLFunctionListError(resp, request)
+ body = resp.body or {}
return {func['name']: func['code'] for func in map(dict, body)}
- return request, handler
+ return self._execute(request, response_handler)
- @api_method
def create_function(self, name, code):
"""Create a new AQL function.
- :param name: the name of the new AQL function to create
+ :param name: AQL function name.
:type name: str | unicode
- :param code: the definition of the function in Javascript
+ :param code: Function definition in Javascript.
:type code: str | unicode
- :returns: whether the AQL function was created successfully
+ :return: True if AQL function was created successfully.
:rtype: bool
- :raises arango.exceptions.AQLFunctionCreateError: if the AQL function
- cannot be created
+ :raise arango.exceptions.AQLFunctionCreateError: If create fails.
"""
request = Request(
method='post',
@@ -200,92 +476,85 @@ def create_function(self, name, code):
data={'name': name, 'code': code}
)
- def handler(res):
- if res.status_code not in (200, 201):
- raise AQLFunctionCreateError(res)
- return not res.body['error']
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLFunctionCreateError(resp, request)
+ return True
- return request, handler
+ return self._execute(request, response_handler)
- @api_method
- def delete_function(self, name, group=None, ignore_missing=False):
- """Delete the AQL function of the given name.
+ def delete_function(self, name, group=False, ignore_missing=False):
+ """Delete an AQL function.
- If ``group`` is set to True, then the function name provided in
- ``name`` is treated as a namespace prefix, and all functions in
- the specified namespace will be deleted. If set to False, the
- function name provided in ``name`` must be fully qualified,
- including any namespaces.
-
- :param name: the name of the AQL function to delete
+ :param name: AQL function name.
:type name: str | unicode
- :param group: treat the name as a namespace prefix
+ :param group: If set to True, value of parameter **name** is treated
+ as a namespace prefix, and all functions in the namespace are
+ deleted. If set to False, the value of **name** must be a fully
+ qualified function name including any namespaces.
:type group: bool
- :param ignore_missing: ignore missing functions
+ :param ignore_missing: Do not raise an exception on missing function.
:type ignore_missing: bool
- :returns: whether the AQL function was deleted successfully
+ :return: True if AQL function was deleted successfully, False if
+ function was not found and **ignore_missing** was set to True.
:rtype: bool
- :raises arango.exceptions.AQLFunctionDeleteError: if the AQL function
- cannot be deleted
+ :raise arango.exceptions.AQLFunctionDeleteError: If delete fails.
"""
request = Request(
method='delete',
endpoint='/_api/aqlfunction/{}'.format(name),
- params={'group': group} if group is not None else {}
+ params={'group': group}
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- if not (res.status_code == 404 and ignore_missing):
- raise AQLFunctionDeleteError(res)
- return not res.body['error']
+ def response_handler(resp):
+ if resp.error_code == 1582 and ignore_missing:
+ return False
+ if not resp.is_success:
+ raise AQLFunctionDeleteError(resp, request)
+ return True
- return request, handler
+ return self._execute(request, response_handler)
class AQLQueryCache(APIWrapper):
- """ArangoDB query cache.
+ """AQL Query Cache API wrapper."""
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- """
-
- def __init__(self, connection):
- self._conn = connection
+ def __repr__(self):
+ return ''.format(self._conn.db_name)
- @api_method
def properties(self):
- """Return the properties of the query cache.
+ """Return the query cache properties.
- :returns: the cache properties
+ :return: Query cache properties.
:rtype: dict
- :raises arango.exceptions.AQLCachePropertiesError: if the cache
- properties cannot be retrieved
+ :raise arango.exceptions.AQLCachePropertiesError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/_api/query-cache/properties'
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise AQLCachePropertiesError(res)
- return {'mode': res.body['mode'], 'limit': res.body['maxResults']}
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLCachePropertiesError(resp, request)
+ return {
+ 'mode': resp.body['mode'],
+ 'limit': resp.body['maxResults']
+ }
- return request, handler
+ return self._execute(request, response_handler)
- @api_method
def configure(self, mode=None, limit=None):
- """Configure the AQL query cache.
+ """Configure the query cache properties.
- :param mode: the operation mode (``"off"``, ``"on"`` or ``"demand"``)
+ :param mode: Operation mode. Allowed values are "off", "on" and
+ "demand".
:type mode: str | unicode
- :param limit: the maximum number of results to be stored
+ :param limit: Max number of query results to be stored.
:type limit: int
- :returns: the result of the operation
+ :return: Query cache properties.
:rtype: dict
- :raises arango.exceptions.AQLCacheConfigureError: if the
- cache properties cannot be updated
+ :raise arango.exceptions.AQLCacheConfigureError: If operation fails.
"""
data = {}
if mode is not None:
@@ -299,27 +568,31 @@ def configure(self, mode=None, limit=None):
data=data
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise AQLCacheConfigureError(res)
- return {'mode': res.body['mode'], 'limit': res.body['maxResults']}
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLCacheConfigureError(resp, request)
+ return {
+ 'mode': resp.body['mode'],
+ 'limit': resp.body['maxResults']
+ }
- return request, handler
+ return self._execute(request, response_handler)
- @api_method
def clear(self):
- """Clear any results in the query cache.
+ """Clear the query cache.
- :returns: the result of the operation
+ :return: True if query cache was cleared successfully.
:rtype: dict
- :raises arango.exceptions.AQLCacheClearError: if the cache query
- cannot be cleared
+ :raise arango.exceptions.AQLCacheClearError: If operation fails.
"""
- request = Request(method='delete', endpoint='/_api/query-cache')
+ request = Request(
+ method='delete',
+ endpoint='/_api/query-cache'
+ )
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise AQLCacheClearError(res)
- return not res.body['error']
+ def response_handler(resp):
+ if not resp.is_success:
+ raise AQLCacheClearError(resp, request)
+ return True
- return request, handler
+ return self._execute(request, response_handler)
diff --git a/arango/async.py b/arango/async.py
deleted file mode 100644
index 78377440..00000000
--- a/arango/async.py
+++ /dev/null
@@ -1,240 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from arango.collections import Collection
-from arango.connection import Connection
-from arango.utils import HTTP_OK
-from arango.exceptions import (
- AsyncExecuteError,
- AsyncJobCancelError,
- AsyncJobStatusError,
- AsyncJobResultError,
- AsyncJobClearError
-)
-from arango.graph import Graph
-from arango.aql import AQL
-
-
-class AsyncExecution(Connection):
- """ArangoDB asynchronous execution.
-
- API requests via this class are placed in a server-side in-memory task
- queue and executed asynchronously in a fire-and-forget style.
-
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- :param return_result: if ``True``, an :class:`arango.async.AsyncJob`
- instance (which holds the result of the request) is returned each
- time an API request is queued, otherwise ``None`` is returned
- :type return_result: bool
-
- .. warning::
- Asynchronous execution is currently an experimental feature and is not
- thread-safe.
- """
-
- def __init__(self, connection, return_result=True):
- super(AsyncExecution, self).__init__(
- protocol=connection.protocol,
- host=connection.host,
- port=connection.port,
- username=connection.username,
- password=connection.password,
- http_client=connection.http_client,
- database=connection.database,
- enable_logging=connection.logging_enabled
- )
- self._return_result = return_result
- self._aql = AQL(self)
- self._type = 'async'
-
- def __repr__(self):
- return ''
-
- def handle_request(self, request, handler):
- """Handle the incoming request and response handler.
-
- :param request: the API request to be placed in the server-side queue
- :type request: arango.request.Request
- :param handler: the response handler
- :type handler: callable
- :returns: the async job or None
- :rtype: arango.async.AsyncJob
- :raises arango.exceptions.AsyncExecuteError: if the async request
- cannot be executed
- """
- if self._return_result:
- request.headers['x-arango-async'] = 'store'
- else:
- request.headers['x-arango-async'] = 'true'
-
- res = getattr(self, request.method)(**request.kwargs)
- if res.status_code not in HTTP_OK:
- raise AsyncExecuteError(res)
- if self._return_result:
- return AsyncJob(self, res.headers['x-arango-async-id'], handler)
-
- @property
- def aql(self):
- """Return the AQL object tailored for asynchronous execution.
-
- API requests via the returned query object are placed in a server-side
- in-memory task queue and executed asynchronously in a fire-and-forget
- style.
-
- :returns: ArangoDB query object
- :rtype: arango.query.AQL
- """
- return self._aql
-
- def collection(self, name):
- """Return a collection object tailored for asynchronous execution.
-
- API requests via the returned collection object are placed in a
- server-side in-memory task queue and executed asynchronously in
- a fire-and-forget style.
-
- :param name: the name of the collection
- :type name: str | unicode
- :returns: the collection object
- :rtype: arango.collections.Collection
- """
- return Collection(self, name)
-
- def graph(self, name):
- """Return a graph object tailored for asynchronous execution.
-
- API requests via the returned graph object are placed in a server-side
- in-memory task queue and executed asynchronously in a fire-and-forget
- style.
-
- :param name: the name of the graph
- :type name: str | unicode
- :returns: the graph object
- :rtype: arango.graph.Graph
- """
- return Graph(self, name)
-
-
-class AsyncJob(object):
- """ArangoDB async job which holds the result of an API request.
-
- An async job tracks the status of a queued API request and its result.
-
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- :param job_id: the ID of the async job
- :type job_id: str | unicode
- :param handler: the response handler
- :type handler: callable
- """
-
- def __init__(self, connection, job_id, handler):
- self._conn = connection
- self._id = job_id
- self._handler = handler
-
- def __repr__(self):
- return ''.format(self._id)
-
- @property
- def id(self):
- """Return the ID of the async job.
-
- :returns: the ID of the async job
- :rtype: str | unicode
- """
- return self._id
-
- def status(self):
- """Return the status of the async job from the server.
-
- :returns: the status of the async job, which can be ``"pending"`` (the
- job is still in the queue), ``"done"`` (the job finished or raised
- an exception)
- :rtype: str | unicode
- :raises arango.exceptions.AsyncJobStatusError: if the status of the
- async job cannot be retrieved from the server
- """
- res = self._conn.get('/_api/job/{}'.format(self.id))
- if res.status_code == 204:
- return 'pending'
- elif res.status_code in HTTP_OK:
- return 'done'
- elif res.status_code == 404:
- raise AsyncJobStatusError(res, 'Job {} missing'.format(self.id))
- else:
- raise AsyncJobStatusError(res)
-
- def result(self):
- """Return the result of the async job if available.
-
- :returns: the result or the exception from the async job
- :rtype: object
- :raises arango.exceptions.AsyncJobResultError: if the result of the
- async job cannot be retrieved from the server
-
- .. note::
- An async job result will automatically be cleared from the server
- once fetched and will *not* be available in subsequent calls.
- """
- res = self._conn.put('/_api/job/{}'.format(self._id))
- if ('X-Arango-Async-Id' in res.headers
- or 'x-arango-async-id' in res.headers):
- try:
- result = self._handler(res)
- except Exception as error:
- return error
- else:
- return result
- elif res.status_code == 204:
- raise AsyncJobResultError(res, 'Job {} not done'.format(self._id))
- elif res.status_code == 404:
- raise AsyncJobResultError(res, 'Job {} missing'.format(self._id))
- else:
- raise AsyncJobResultError(res)
-
- def cancel(self, ignore_missing=False): # pragma: no cover
- """Cancel the async job if it is still pending.
-
- :param ignore_missing: ignore missing async jobs
- :type ignore_missing: bool
- :returns: ``True`` if the job was cancelled successfully, ``False`` if
- the job was not found but **ignore_missing** was set to ``True``
- :rtype: bool
- :raises arango.exceptions.AsyncJobCancelError: if the async job cannot
- be cancelled
-
- .. note::
- An async job cannot be cancelled once it is taken out of the queue
- (i.e. started, finished or cancelled).
- """
- res = self._conn.put('/_api/job/{}/cancel'.format(self._id))
- if res.status_code == 200:
- return True
- elif res.status_code == 404:
- if ignore_missing:
- return False
- raise AsyncJobCancelError(res, 'Job {} missing'.format(self._id))
- else:
- raise AsyncJobCancelError(res)
-
- def clear(self, ignore_missing=False):
- """Delete the result of the job from the server.
-
- :param ignore_missing: ignore missing async jobs
- :type ignore_missing: bool
- :returns: ``True`` if the result was deleted successfully, ``False``
- if the job was not found but **ignore_missing** was set to ``True``
- :rtype: bool
- :raises arango.exceptions.AsyncJobClearError: if the result of the
- async job cannot be delete from the server
- """
- res = self._conn.delete('/_api/job/{}'.format(self._id))
- if res.status_code in HTTP_OK:
- return True
- elif res.status_code == 404:
- if ignore_missing:
- return False
- raise AsyncJobClearError(res, 'Job {} missing'.format(self._id))
- else:
- raise AsyncJobClearError(res)
diff --git a/arango/batch.py b/arango/batch.py
deleted file mode 100644
index 7441e64a..00000000
--- a/arango/batch.py
+++ /dev/null
@@ -1,268 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from uuid import uuid4
-
-from arango.collections import Collection
-from arango.connection import Connection
-from arango.utils import HTTP_OK
-from arango.exceptions import BatchExecuteError, ArangoError
-from arango.graph import Graph
-from arango.response import Response
-from arango.aql import AQL
-
-
-class BatchExecution(Connection):
- """ArangoDB batch request.
-
- API requests via this class are queued in memory and executed as a whole
- in a single HTTP call to ArangoDB server.
-
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- :param return_result: if ``True``, a :class:`arango.batch.BatchJob`
- (which holds the result after the commit) is returned each time an API
- request is queued, otherwise ``None`` is returned
- :type return_result: bool
- :param commit_on_error: only applicable when *context managers* are used
- to execute the batch request: if ``True``, the requests queued
- so far are committed even if an exception is raised before existing
- out of the context (default: ``False``)
- :type commit_on_error: bool
-
- .. warning::
- Batch execution is currently an experimental feature and is not
- thread-safe.
- """
-
- def __init__(self, connection, return_result=True, commit_on_error=False):
- super(BatchExecution, self).__init__(
- protocol=connection.protocol,
- host=connection.host,
- port=connection.port,
- username=connection.username,
- password=connection.password,
- http_client=connection.http_client,
- database=connection.database,
- enable_logging=connection.logging_enabled
- )
- self._id = uuid4()
- self._return_result = return_result
- self._commit_on_error = commit_on_error
- self._requests = [] # The queue for requests
- self._handlers = [] # The queue for response handlers
- self._batch_jobs = [] # For tracking batch jobs
- self._aql = AQL(self)
- self._type = 'batch'
-
- def __repr__(self):
- return ''.format(self._id)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exception, *_):
- if exception is None or self._commit_on_error:
- self.commit()
-
- @property
- def id(self):
- """Return the UUID of the batch request.
-
- :return: the UUID of the batch request
- :rtype: str | unicode
- """
- return self._id
-
- def handle_request(self, request, handler):
- """Handle the incoming request and response handler.
-
- :param request: the API request queued as part of the current batch
- request scope, and executed only when the batch is committed via
- method :func:`arango.batch.BatchExecution.commit`
- :type request: arango.request.Request
- :param handler: the response handler
- :type handler: callable
- :returns: the batch job or None
- :rtype: arango.batch.BatchJob
- """
- self._requests.append(request)
- self._handlers.append(handler)
-
- if not self._return_result:
- return None
- batch_job = BatchJob()
- self._batch_jobs.append(batch_job)
- return batch_job
-
- def commit(self):
- """Execute the queued API requests in a single HTTP call.
-
- If `return_response` was set to ``True`` during initialization, the
- responses are saved within an :class:`arango.batch.BatchJob` object
- for later retrieval via its :func:`arango.batch.BatchJob.result`
- method
-
- :raises arango.exceptions.BatchExecuteError: if the batch request
- cannot be executed
- """
- try:
- if not self._requests:
- return
- raw_data_list = []
- for content_id, request in enumerate(self._requests, start=1):
- raw_data_list.append('--XXXsubpartXXX\r\n')
- raw_data_list.append('Content-Type: application/x-arango-batchpart\r\n')
- raw_data_list.append('Content-Id: {}\r\n\r\n'.format(content_id))
- raw_data_list.append('{}\r\n'.format(request.stringify()))
- raw_data_list.append('--XXXsubpartXXX--\r\n\r\n')
- raw_data = ''.join(raw_data_list)
-
- res = self.post(
- endpoint='/_api/batch',
- headers={
- 'Content-Type': (
- 'multipart/form-data; boundary=XXXsubpartXXX'
- )
- },
- data=raw_data,
- )
- if res.status_code not in HTTP_OK:
- raise BatchExecuteError(res)
- if not self._return_result:
- return
-
- for index, raw_response in enumerate(
- res.raw_body.split('--XXXsubpartXXX')[1:-1]
- ):
- request = self._requests[index]
- handler = self._handlers[index]
- job = self._batch_jobs[index]
- res_parts = raw_response.strip().split('\r\n')
- raw_status, raw_body = res_parts[3], res_parts[-1]
- _, status_code, status_text = raw_status.split(' ', 2)
- try:
- result = handler(Response(
- method=request.method,
- url=self._url_prefix + request.endpoint,
- headers=request.headers,
- http_code=int(status_code),
- http_text=status_text,
- body=raw_body
- ))
- except ArangoError as err:
- job.update(status='error', result=err)
- else:
- job.update(status='done', result=result)
- finally:
- self._requests = []
- self._handlers = []
- self._batch_jobs = []
-
- def clear(self):
- """Clear the requests queue and discard pointers to batch jobs issued.
-
- :returns: the number of requests (and batch job pointers) discarded
- :rtype: int
-
- .. warning::
- This method will orphan any batch jobs that were issued
- """
- count = len(self._requests)
- self._requests = []
- self._handlers = []
- self._batch_jobs = []
- return count
-
- @property
- def aql(self):
- """Return the AQL object tailored for batch execution.
-
- API requests via the returned object are placed in an in-memory queue
- and committed as a whole in a single HTTP call to the ArangoDB server.
-
- :returns: ArangoDB query object
- :rtype: arango.query.AQL
- """
- return self._aql
-
- def collection(self, name):
- """Return the collection object tailored for batch execution.
-
- API requests via the returned object are placed in an in-memory queue
- and committed as a whole in a single HTTP call to the ArangoDB server.
-
- :param name: the name of the collection
- :type name: str | unicode
- :returns: the collection object
- :rtype: arango.collections.Collection
- """
- return Collection(self, name)
-
- def graph(self, name):
- """Return the graph object tailored for batch execution.
-
- API requests via the returned object are placed in an in-memory queue
- and committed as a whole in a single HTTP call to the ArangoDB server.
-
- :param name: the name of the graph
- :type name: str | unicode
- :returns: the graph object
- :rtype: arango.graph.Graph
- """
- return Graph(self, name)
-
-
-class BatchJob(object):
- """ArangoDB batch job which holds the result of an API request.
-
- A batch job tracks the status of a queued API request and its result.
- """
-
- def __init__(self):
- self._id = uuid4()
- self._status = 'pending'
- self._result = None
-
- def __repr__(self):
- return ''.format(self._id)
-
- @property
- def id(self):
- """Return the UUID of the batch job.
-
- :return: the UUID of the batch job
- :rtype: str | unicode
- """
- return self._id
-
- def update(self, status, result=None):
- """Update the status and the result of the batch job.
-
- This method designed to be used internally only.
-
- :param status: the status of the job
- :type status: int
- :param result: the result of the job
- :type result: object
- """
- self._status = status
- self._result = result
-
- def status(self):
- """Return the status of the batch job.
-
- :returns: the batch job status, which can be ``"pending"`` (the job is
- still waiting to be committed), ``"done"`` (the job completed) or
- ``"error"`` (the job raised an exception)
- :rtype: str | unicode
- """
- return self._status
-
- def result(self):
- """Return the result of the job or raise its error.
-
- :returns: the result of the batch job if the job is successful
- :rtype: object
- :raises ArangoError: if the batch job failed
- """
- return self._result
diff --git a/arango/client.py b/arango/client.py
index cb24700e..c1e17668 100644
--- a/arango/client.py
+++ b/arango/client.py
@@ -1,113 +1,54 @@
from __future__ import absolute_import, unicode_literals
-from datetime import datetime
+__all__ = ['ArangoClient']
-from requests import ConnectionError
-
-from arango.http_clients import DefaultHTTPClient
from arango.connection import Connection
-from arango.utils import HTTP_OK
-from arango.database import Database
-from arango.exceptions import *
-from arango.wal import WriteAheadLog
+from arango.database import StandardDatabase
+from arango.exceptions import ServerConnectionError
+from arango.version import __version__
class ArangoClient(object):
"""ArangoDB client.
- :param protocol: The internet transfer protocol (default: ``"http"``).
+ :param protocol: Internet transfer protocol (default: "http").
:type protocol: str | unicode
- :param host: ArangoDB server host (default: ``"localhost"``).
+ :param host: ArangoDB host (default: "127.0.0.1").
:type host: str | unicode
- :param port: ArangoDB server port (default: ``8529``).
- :type port: int or str
- :param username: ArangoDB default username (default: ``"root"``).
- :type username: str | unicode
- :param password: ArangoDB default password (default: ``""``).
- :param verify: Check the connection during initialization. Root privileges
- are required to use this flag.
- :type verify: bool
- :param http_client: Custom HTTP client to override the default one with.
- Please refer to the API documentation for more details.
- :type http_client: arango.http_clients.base.BaseHTTPClient
- :param enable_logging: Log all API requests as debug messages.
- :type enable_logging: bool
- :param check_cert: Verify SSL certificate when making HTTP requests. This
- flag is ignored if a custom **http_client** is specified.
- :type check_cert: bool
- :param use_session: Use session when making HTTP requests. This flag is
- ignored if a custom **http_client** is specified.
- :type use_session: bool
- :param logger: Custom logger to record the API requests with. The logger's
- ``debug`` method is called.
- :type logger: logging.Logger
+ :param port: ArangoDB port (default: 8529).
+ :type port: int
+ :param http_client: User-defined HTTP client.
+ :type http_client: arango.http.HTTPClient
"""
def __init__(self,
protocol='http',
host='127.0.0.1',
port=8529,
- username='root',
- password='',
- verify=False,
- http_client=None,
- enable_logging=True,
- check_cert=True,
- use_session=True,
- logger=None):
-
- self._protocol = protocol
- self._host = host
- self._port = port
- self._username = username
- self._password = password
- self._http_client = DefaultHTTPClient(
- use_session=use_session,
- check_cert=check_cert
- ) if http_client is None else http_client
- self._logging_enabled = enable_logging
- self._conn = Connection(
- protocol=self._protocol,
- host=self._host,
- port=self._port,
- database='_system',
- username=self._username,
- password=self._password,
- http_client=self._http_client,
- enable_logging=self._logging_enabled,
- logger=logger
- )
- self._wal = WriteAheadLog(self._conn)
-
- if verify:
- self.verify()
+ http_client=None):
+ self._protocol = protocol.strip('/')
+ self._host = host.strip('/')
+ self._port = int(port)
+ self._url = '{}://{}:{}'.format(protocol, host, port)
+ self._http_client = http_client
def __repr__(self):
- return ''.format(self._host)
+ return ''.format(self._url)
- def verify(self):
- """Verify the connection to ArangoDB server.
-
- :returns: ``True`` if the connection is successful
- :rtype: bool
- :raises arango.exceptions.ServerConnectionError: if the connection to
- the ArangoDB server fails
+ @property
+ def version(self):
+ """Return the client version.
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.verify` (via a database
- the users have access to) instead.
+ :return: Client version.
+ :rtype: str | unicode
"""
- res = self._conn.head('/_api/version')
- if res.status_code not in HTTP_OK:
- raise ServerConnectionError(res)
- return True
+ return __version__
@property
def protocol(self):
- """Return the internet transfer protocol.
+ """Return the internet transfer protocol (e.g. "http").
- :returns: the internet transfer protocol
+ :return: Internet transfer protocol.
:rtype: str | unicode
"""
return self._protocol
@@ -116,7 +57,7 @@ def protocol(self):
def host(self):
"""Return the ArangoDB host.
- :returns: the ArangoDB host
+ :return: ArangoDB host.
:rtype: str | unicode
"""
return self._host
@@ -125,891 +66,51 @@ def host(self):
def port(self):
"""Return the ArangoDB port.
- :returns: the ArangoDB port
+ :return: ArangoDB port.
:rtype: int
"""
return self._port
@property
- def username(self):
- """Return the ArangoDB username.
-
- :returns: the ArangoDB username
- :rtype: str | unicode
- """
- return self._username
-
- @property
- def password(self):
- """Return the ArangoDB user password.
+ def base_url(self):
+ """Return the ArangoDB base URL.
- :returns: the ArangoDB user password
+ :return: ArangoDB base URL.
:rtype: str | unicode
"""
- return self._password
+ return self._url
- @property
- def http_client(self):
- """Return the HTTP client.
-
- :returns: the HTTP client
- :rtype: arango.http_clients.base.BaseHTTPClient
- """
- return self._http_client
-
- @property
- def logging_enabled(self):
- """Return True if logging is enabled, False otherwise.
-
- :returns: whether logging is enabled
- :rtype: bool
- """
- return self._logging_enabled
-
- @property
- def wal(self):
- """Return the write-ahead log object.
-
- :returns: the write-ahead log object
- :rtype: arango.wal.WriteAheadLog
- """
- return self._wal
-
- def version(self):
- """Return the version of the ArangoDB server.
-
- :returns: the server version
- :rtype: str | unicode
- :raises arango.exceptions.ServerVersionError: if the server version
- cannot be retrieved
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.version` (via a database
- the users have access to) instead.
- """
- res = self._conn.get(
- endpoint='/_api/version',
- params={'details': False}
- )
- if res.status_code not in HTTP_OK:
- raise ServerVersionError(res)
- return res.body['version']
-
- def details(self):
- """Return the component details on the ArangoDB server.
-
- :returns: the server details
- :rtype: dict
- :raises arango.exceptions.ServerDetailsError: if the server details
- cannot be retrieved
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.details` (via a database
- the users have access to) instead.
- """
- res = self._conn.get(
- endpoint='/_api/version',
- params={'details': True}
- )
- if res.status_code not in HTTP_OK:
- raise ServerDetailsError(res)
- return res.body['details']
-
- def required_db_version(self):
- """Return the required version of the target database.
-
- :returns: the required version of the target database
- :rtype: str | unicode
- :raises arango.exceptions.ServerRequiredDBVersionError: if the
- required database version cannot be retrieved
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.required_db_version` (via
- a database the users have access to) instead.
- """
- res = self._conn.get('/_admin/database/target-version')
- if res.status_code not in HTTP_OK:
- raise ServerRequiredDBVersionError(res)
- return res.body['version']
-
- def statistics(self, description=False):
- """Return the server statistics.
-
- :returns: the statistics information
- :rtype: dict
- :raises arango.exceptions.ServerStatisticsError: if the server
- statistics cannot be retrieved
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.statistics` (via a database
- the users have access to) instead.
- """
- res = self._conn.get(
- '/_admin/statistics-description'
- if description else '/_admin/statistics'
- )
- if res.status_code not in HTTP_OK:
- raise ServerStatisticsError(res)
- res.body.pop('code', None)
- res.body.pop('error', None)
- return res.body
-
- def role(self):
- """Return the role of the server in the cluster if any.
-
- :returns: the server role which can be ``"SINGLE"`` (the server is not
- in a cluster), ``"COORDINATOR"`` (the server is a coordinator in
- the cluster), ``"PRIMARY"`` (the server is a primary database in
- the cluster), ``"SECONDARY"`` (the server is a secondary database
- in the cluster) or ``"UNDEFINED"`` (the server role is undefined,
- the only possible value for a single server)
- :rtype: str | unicode
- :raises arango.exceptions.ServerRoleError: if the server role cannot
- be retrieved
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.role` (via a database the
- users have access to) instead.
- """
- res = self._conn.get('/_admin/server/role')
- if res.status_code not in HTTP_OK:
- raise ServerRoleError(res)
- return res.body.get('role')
-
- def time(self):
- """Return the current server system time.
-
- :returns: the server system time
- :rtype: datetime.datetime
- :raises arango.exceptions.ServerTimeError: if the server time
- cannot be retrieved
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.time` (via a database the
- users have access to) instead.
- """
- res = self._conn.get('/_admin/time')
- if res.status_code not in HTTP_OK:
- raise ServerTimeError(res)
- return datetime.fromtimestamp(res.body['time'])
-
- def endpoints(self):
- """Return the list of the endpoints the server is listening on.
-
- Each endpoint is mapped to a list of databases. If the list is empty,
- it means all databases can be accessed via the endpoint. If the list
- contains more than one database, the first database receives all the
- requests by default, unless the name is explicitly specified.
-
- :returns: the list of endpoints
- :rtype: list
- :raises arango.exceptions.ServerEndpointsError: if the endpoints
- cannot be retrieved from the server
-
- .. note::
- Only the root user can access this method.
- """
- res = self._conn.get('/_api/endpoint')
- if res.status_code not in HTTP_OK:
- raise ServerEndpointsError(res)
- return res.body
-
- def echo(self):
- """Return information on the last request (headers, payload etc.)
-
- :returns: the details of the last request
- :rtype: dict
- :raises arango.exceptions.ServerEchoError: if the last request cannot
- be retrieved from the server
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.echo` (via a database the
- users have access to) instead.
- """
- res = self._conn.get('/_admin/echo')
- if res.status_code not in HTTP_OK:
- raise ServerEchoError(res)
- return res.body
-
- def sleep(self, seconds):
- """Suspend the execution for a specified duration before returning.
-
- :param seconds: the number of seconds to suspend
- :type seconds: int
- :returns: the number of seconds suspended
- :rtype: int
- :raises arango.exceptions.ServerSleepError: if the server cannot be
- suspended
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.sleep` (via a database the
- users have access to) instead.
- """
- res = self._conn.get(
- '/_admin/sleep',
- params={'duration': seconds}
- )
- if res.status_code not in HTTP_OK:
- raise ServerSleepError(res)
- return res.body['duration']
-
- def shutdown(self): # pragma: no cover
- """Initiate the server shutdown sequence.
-
- :returns: whether the server was shutdown successfully
- :rtype: bool
- :raises arango.exceptions.ServerShutdownError: if the server shutdown
- sequence cannot be initiated
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.shutdown` (via a database
- the users have access to) instead.
- """
- try:
- res = self._conn.delete('/_admin/shutdown')
- except ConnectionError:
- return False
- if res.status_code not in HTTP_OK:
- raise ServerShutdownError(res)
- return True
-
- def run_tests(self, tests): # pragma: no cover
- """Run the available unittests on the server.
-
- :param tests: list of files containing the test suites
- :type tests: list
- :returns: the test results
- :rtype: dict
- :raises arango.exceptions.ServerRunTestsError: if the test suites fail
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.run_tests` (via a database
- the users have access to) instead.
- """
- res = self._conn.post('/_admin/test', data={'tests': tests})
- if res.status_code not in HTTP_OK:
- raise ServerRunTestsError(res)
- return res.body
-
- def execute(self, program): # pragma: no cover
- """Execute a Javascript program on the server.
-
- :param program: the body of the Javascript program to execute.
- :type program: str | unicode
- :returns: the result of the execution
- :rtype: str | unicode
- :raises arango.exceptions.ServerExecuteError: if the program cannot
- be executed on the server
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.execute` (via a database
- the users have access to) instead.
- """
- res = self._conn.post('/_admin/execute', data=program)
- if res.status_code not in HTTP_OK:
- raise ServerExecuteError(res)
- return res.body
-
- def read_log(self,
- upto=None,
- level=None,
- start=None,
- size=None,
- offset=None,
- search=None,
- sort=None):
- """Read the global log from the server.
-
- :param upto: return the log entries up to the given level (mutually
- exclusive with argument **level**), which must be ``"fatal"``,
- ``"error"``, ``"warning"``, ``"info"`` (default) or ``"debug"``
- :type upto: str | unicode | int
- :param level: return the log entries of only the given level (mutually
- exclusive with **upto**), which must be ``"fatal"``, ``"error"``,
- ``"warning"``, ``"info"`` (default) or ``"debug"``
- :type level: str | unicode | int
- :param start: return the log entries whose ID is greater or equal to
- the given value
- :type start: int
- :param size: restrict the size of the result to the given value (this
- setting can be used for pagination)
- :type size: int
- :param offset: the number of entries to skip initially (this setting
- can be setting can be used for pagination)
- :type offset: int
- :param search: return only the log entries containing the given text
- :type search: str | unicode
- :param sort: sort the log entries according to the given fashion, which
- can be ``"sort"`` or ``"desc"``
- :type sort: str | unicode
- :returns: the server log entries
- :rtype: dict
- :raises arango.exceptions.ServerReadLogError: if the server log entries
- cannot be read
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.read_log` (via a database
- the users have access to) instead.
- """
- params = dict()
- if upto is not None:
- params['upto'] = upto
- if level is not None:
- params['level'] = level
- if start is not None:
- params['start'] = start
- if size is not None:
- params['size'] = size
- if offset is not None:
- params['offset'] = offset
- if search is not None:
- params['search'] = search
- if sort is not None:
- params['sort'] = sort
- res = self._conn.get('/_admin/log')
- if res.status_code not in HTTP_OK:
- raise ServerReadLogError(res)
- if 'totalAmount' in res.body:
- res.body['total_amount'] = res.body.pop('totalAmount')
- return res.body
-
- def log_levels(self):
- """Return the current logging levels.
-
- .. note::
- This method is only compatible with ArangoDB version 3.1+ only.
-
- :return: the current logging levels
- :rtype: dict
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.log_levels` (via a database
- the users have access to) instead.
- """
- res = self._conn.get('/_admin/log/level')
- if res.status_code not in HTTP_OK:
- raise ServerLogLevelError(res)
- return res.body
-
- def set_log_levels(self, **kwargs):
- """Set the logging levels.
-
- This method takes arbitrary keyword arguments where the keys are the
- logger names and the values are the logging levels. For example:
-
- .. code-block:: python
-
- arango.set_log_level(
- agency='DEBUG',
- collector='INFO',
- threads='WARNING'
- )
-
- :return: the new logging levels
- :rtype: dict
-
- .. note::
- Keys that are not valid logger names are simply ignored.
-
- .. note::
- This method is only compatible with ArangoDB version 3.1+ only.
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.set_log_levels` (via a
- database the users have access to) instead.
- """
- res = self._conn.put('/_admin/log/level', data=kwargs)
- if res.status_code not in HTTP_OK:
- raise ServerLogLevelSetError(res)
- return res.body
+ def db(self, name='_system', username='root', password='', verify=False):
+ """Connect to a database and return the database API wrapper.
- def reload_routing(self):
- """Reload the routing information from the collection *routing*.
-
- :returns: whether the routing was reloaded successfully
- :rtype: bool
- :raises arango.exceptions.ServerReloadRoutingError: if the routing
- cannot be reloaded
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.reload_routing` (via a
- database the users have access to) instead.
- """
- res = self._conn.post('/_admin/routing/reload')
- if res.status_code not in HTTP_OK:
- raise ServerReloadRoutingError(res)
- return not res.body['error']
-
- #######################
- # Database Management #
- #######################
-
- def databases(self, user_only=False):
- """Return the database names.
-
- :param user_only: list only the databases accessible by the user
- :type user_only: bool
- :returns: the database names
- :rtype: list
- :raises arango.exceptions.DatabaseListError: if the retrieval fails
-
- .. note::
- Only the root user can access this method.
- """
- # Get the current user's databases
- res = self._conn.get(
- '/_api/database/user'
- if user_only else '/_api/database'
- )
- if res.status_code not in HTTP_OK:
- raise DatabaseListError(res)
- return res.body['result']
-
- def db(self, name, username=None, password=None):
- """Return the database object.
-
- This is an alias for :func:`arango.client.ArangoClient.database`.
-
- :param name: the name of the database
- :type name: str | unicode
- :param username: the username for authentication (if set, overrides
- the username specified during the client initialization)
- :type username: str | unicode
- :param password: the password for authentication (if set, overrides
- the password specified during the client initialization
- :type password: str | unicode
- :returns: the database object
- :rtype: arango.database.Database
- """
- return self.database(name, username, password)
-
- def database(self, name, username=None, password=None):
- """Return the database object.
-
- :param name: the name of the database
- :type name: str | unicode
- :param username: the username for authentication (if set, overrides
- the username specified during the client initialization)
- :type username: str | unicode
- :param password: the password for authentication (if set, overrides
- the password specified during the client initialization
- :type password: str | unicode
- :returns: the database object
- :rtype: arango.database.Database
- """
- return Database(Connection(
- protocol=self._protocol,
- host=self._host,
- port=self._port,
- database=name,
- username=username or self._username,
- password=password or self._password,
- http_client=self._http_client,
- enable_logging=self._logging_enabled
- ))
-
- def create_database(self, name, users=None, username=None, password=None):
- """Create a new database.
-
- :param name: the name of the new database
- :type name: str | unicode
- :param users: the list of users with access to the new database, where
- each user is a dictionary with keys ``"username"``, ``"password"``,
- ``"active"`` and ``"extra"``.
- :type users: [dict]
- :param username: the username for authentication (if set, overrides
- the username specified during the client initialization)
- :type username: str | unicode
- :param password: the password for authentication (if set, overrides
- the password specified during the client initialization
- :type password: str | unicode
- :returns: the database object
- :rtype: arango.database.Database
- :raises arango.exceptions.DatabaseCreateError: if the create fails
-
- .. note::
- Here is an example entry in **users**:
-
- .. code-block:: python
-
- {
- 'username': 'john',
- 'password': 'password',
- 'active': True,
- 'extra': {'Department': 'IT'}
- }
-
- If **users** is not set, only the root and the current user are
- granted access to the new database by default.
-
- .. note::
- Root privileges (i.e. access to the ``_system`` database) are
- required to use this method.
- """
- res = self._conn.post(
- '/_api/database',
- data={
- 'name': name,
- 'users': [{
- 'username': user['username'],
- 'passwd': user['password'],
- 'active': user.get('active', True),
- 'extra': user.get('extra', {})
- } for user in users]
- } if users else {'name': name}
- )
- if res.status_code not in HTTP_OK:
- raise DatabaseCreateError(res)
- return self.db(name, username, password)
-
- def delete_database(self, name, ignore_missing=False):
- """Delete the database of the specified name.
-
- :param name: the name of the database to delete
+ :param name: Database name.
:type name: str | unicode
- :param ignore_missing: ignore missing databases
- :type ignore_missing: bool
- :returns: whether the database was deleted successfully
- :rtype: bool
- :raises arango.exceptions.DatabaseDeleteError: if the delete fails
-
- .. note::
- Root privileges (i.e. access to the ``_system`` database) are
- required to use this method.
- """
- res = self._conn.delete('/_api/database/{}'.format(name))
- if res.status_code not in HTTP_OK:
- if not (res.status_code == 404 and ignore_missing):
- raise DatabaseDeleteError(res)
- return not res.body['error']
-
- ###################
- # User Management #
- ###################
-
- def users(self):
- """Return the details of all users.
-
- :returns: the details of all users
- :rtype: [dict]
- :raises arango.exceptions.UserListError: if the retrieval fails
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.users` (via a database the
- users have access to) instead.
- """
- res = self._conn.get('/_api/user')
- if res.status_code not in HTTP_OK:
- raise UserListError(res)
- return [{
- 'username': record['user'],
- 'active': record['active'],
- 'extra': record['extra'],
- } for record in res.body['result']]
-
- def user(self, username):
- """Return the details of a user.
-
- :param username: the details of the user
- :type username: str | unicode
- :returns: the user details
- :rtype: dict
- :raises arango.exceptions.UserGetError: if the retrieval fails
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.user` (via a database the
- users have access to) instead.
- """
- res = self._conn.get('/_api/user/{}'.format(username))
- if res.status_code not in HTTP_OK:
- raise UserGetError(res)
- return {
- 'username': res.body['user'],
- 'active': res.body['active'],
- 'extra': res.body['extra']
- }
-
- def create_user(self, username, password, active=None, extra=None):
- """Create a new user.
-
- :param username: the name of the user
+ :param username: Username for basic authentication.
:type username: str | unicode
- :param password: the user's password
+ :param password: Password for basic authentication.
:type password: str | unicode
- :param active: whether the user is active
- :type active: bool
- :param extra: any extra data on the user
- :type extra: dict
- :returns: the details of the new user
- :rtype: dict
- :raises arango.exceptions.UserCreateError: if the user create fails
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.create_user` (via a database
- the users have access to) instead.
- """
- data = {'user': username, 'passwd': password}
- if active is not None:
- data['active'] = active
- if extra is not None:
- data['extra'] = extra
-
- res = self._conn.post('/_api/user', data=data)
- if res.status_code not in HTTP_OK:
- raise UserCreateError(res)
- return {
- 'username': res.body['user'],
- 'active': res.body['active'],
- 'extra': res.body['extra'],
- }
-
- def update_user(self, username, password=None, active=None, extra=None):
- """Update an existing user.
-
- :param username: the name of the existing user
- :type username: str | unicode
- :param password: the user's new password
- :type password: str | unicode
- :param active: whether the user is active
- :type active: bool
- :param extra: any extra data on the user
- :type extra: dict
- :returns: the details of the updated user
- :rtype: dict
- :raises arango.exceptions.UserUpdateError: if the user update fails
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.update_user` (via a database
- the users have access to) instead.
- """
- data = {}
- if password is not None:
- data['passwd'] = password
- if active is not None:
- data['active'] = active
- if extra is not None:
- data['extra'] = extra
-
- res = self._conn.patch(
- '/_api/user/{user}'.format(user=username),
- data=data
- )
- if res.status_code not in HTTP_OK:
- raise UserUpdateError(res)
- return {
- 'username': res.body['user'],
- 'active': res.body['active'],
- 'extra': res.body['extra'],
- }
-
- def replace_user(self, username, password, active=None, extra=None):
- """Replace an existing user.
-
- :param username: the name of the existing user
- :type username: str | unicode
- :param password: the user's new password
- :type password: str | unicode
- :param active: whether the user is active
- :type active: bool
- :param extra: any extra data on the user
- :type extra: dict
- :returns: the details of the replaced user
- :rtype: dict
- :raises arango.exceptions.UserReplaceError: if the user replace fails
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.replace_user` (via a database
- the users have access to) instead.
- """
- data = {'user': username, 'passwd': password}
- if active is not None:
- data['active'] = active
- if extra is not None:
- data['extra'] = extra
-
- res = self._conn.put(
- '/_api/user/{user}'.format(user=username),
- data=data
- )
- if res.status_code not in HTTP_OK:
- raise UserReplaceError(res)
- return {
- 'username': res.body['user'],
- 'active': res.body['active'],
- 'extra': res.body['extra'],
- }
-
- def delete_user(self, username, ignore_missing=False):
- """Delete an existing user.
-
- :param username: the name of the existing user
- :type username: str | unicode
- :param ignore_missing: ignore missing users
- :type ignore_missing: bool
- :returns: ``True`` if the operation was successful, ``False`` if the
- user was missing but **ignore_missing** was set to ``True``
- :rtype: bool
- :raises arango.exceptions.UserDeleteError: if the user delete fails
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.delete_user` (via a database
- the users have access to) instead.
- """
- res = self._conn.delete('/_api/user/{user}'.format(user=username))
- if res.status_code in HTTP_OK:
- return True
- elif res.status_code == 404 and ignore_missing:
- return False
- raise UserDeleteError(res)
-
- def user_access(self, username, full=False):
- """Return a user's access details for databases (and collections).
-
- :param username: The name of the user.
- :type username: str | unicode
- :param full: Return the full set of access levels for all databases and
- collections for the user.
- :type full: bool
- :returns: The names of the databases (and collections) the user has
- access to.
- :rtype: [str] | [unicode]
- :raises: arango.exceptions.UserAccessError: If the retrieval fails.
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.user_access` (via a database
- the users have access to) instead.
- """
- res = self._conn.get(
- '/_api/user/{}/database'.format(username),
- params={'full': full}
+ :param verify: Verify the connection by sending a test request.
+ :type verify: bool
+ :return: Standard database API wrapper.
+ :rtype: arango.database.StandardDatabase
+ :raise arango.exceptions.ServerConnectionError: If **verify** was set
+ to True and the connection to ArangoDB fails.
+ """
+ connection = Connection(
+ url=self._url,
+ db=name,
+ username=username,
+ password=password,
+ http_client=self._http_client
)
- if res.status_code in HTTP_OK:
- return list(res.body['result'])
- raise UserAccessError(res)
+ database = StandardDatabase(connection)
- def grant_user_access(self, username, database):
- """Grant user access to a database.
+ if verify: # Check the server connection by making a read API call
+ try:
+ database.ping()
+ except ServerConnectionError as err:
+ raise err
+ except Exception as err:
+ raise ServerConnectionError('bad connection: {}'.format(err))
- :param username: The name of the user.
- :type username: str | unicode
- :param database: The name of the database.
- :type database: str | unicode
- :returns: Whether the operation was successful or not.
- :rtype: bool
- :raises arango.exceptions.UserGrantAccessError: If the operation fails.
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.grant_user_access` (via a
- database the users have access to) instead.
- """
- res = self._conn.put(
- '/_api/user/{}/database/{}'.format(username, database),
- data={'grant': 'rw'}
- )
- if res.status_code in HTTP_OK:
- return True
- raise UserGrantAccessError(res)
-
- def revoke_user_access(self, username, database):
- """Revoke user access to a database.
-
- :param username: The name of the user.
- :type username: str | unicode
- :param database: The name of the database.
- :type database: str | unicode | unicode
- :returns: Whether the operation was successful or not.
- :rtype: bool
- :raises arango.exceptions.UserRevokeAccessError: If the operation fails.
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.revoke_user_access` (via a
- database the users have access to) instead.
- """
- res = self._conn.delete(
- '/_api/user/{}/database/{}'.format(username, database)
- )
- if res.status_code in HTTP_OK:
- return True
- raise UserRevokeAccessError(res)
-
- ########################
- # Async Job Management #
- ########################
-
- def async_jobs(self, status, count=None):
- """Return the IDs of asynchronous jobs with the specified status.
-
- :param status: The job status (``"pending"`` or ``"done"``).
- :type status: str | unicode
- :param count: The maximum number of job IDs to return.
- :type count: int
- :returns: The list of job IDs.
- :rtype: [str]
- :raises arango.exceptions.AsyncJobListError: If the retrieval fails.
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.async_jobs` (via a database
- the users have access to) instead.
- """
- res = self._conn.get(
- '/_api/job/{}'.format(status),
- params={} if count is None else {'count': count}
- )
- if res.status_code not in HTTP_OK:
- raise AsyncJobListError(res)
- return res.body
-
- def clear_async_jobs(self, threshold=None):
- """Delete asynchronous job results from the server.
-
- :param threshold: If specified, only the job results created prior to
- the threshold (a unix timestamp) are deleted, otherwise *all* job
- results are deleted.
- :type threshold: int
- :returns: Whether the deletion of results was successful.
- :rtype: bool
- :raises arango.exceptions.AsyncJobClearError: If the operation fails.
-
- .. note::
- Async jobs currently queued or running are not stopped.
-
- .. note::
- Only the root user can access this method. For non-root users,
- use :func:`arango.database.Database.clear_async_jobs` (via a
- database the users have access to) instead.
- """
- if threshold is None:
- res = self._conn.delete('/_api/job/all')
- else:
- res = self._conn.delete(
- '/_api/job/expired',
- params={'stamp': threshold}
- )
- if res.status_code in HTTP_OK:
- return True
- raise AsyncJobClearError(res)
+ return database
diff --git a/arango/cluster.py b/arango/cluster.py
deleted file mode 100644
index 58d9806c..00000000
--- a/arango/cluster.py
+++ /dev/null
@@ -1,120 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from arango.aql import AQL
-from arango.collections import Collection
-from arango.connection import Connection
-from arango.exceptions import ClusterTestError
-from arango.graph import Graph
-from arango.utils import HTTP_OK
-
-
-class ClusterTest(Connection):
- """ArangoDB cluster round-trip test for sharding.
-
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- :param shard_id: the ID of the shard to which the request is sent
- :type shard_id: str | unicode
- :param transaction_id: the transaction ID for the request
- :type transaction_id: str | unicode
- :param timeout: the timeout in seconds for the cluster operation, where
- an error is returned if the response does not arrive within the given
- limit (default: 24 hrs)
- :type timeout: int
- :param sync: if set to ``True``, the test uses synchronous mode, otherwise
- asynchronous mode is used (this is mainly for debugging purposes)
- :param sync: bool
- """
-
- def __init__(self,
- connection,
- shard_id,
- transaction_id=None,
- timeout=None,
- sync=None):
- super(ClusterTest, self).__init__(
- protocol=connection.protocol,
- host=connection.host,
- port=connection.port,
- username=connection.username,
- password=connection.password,
- http_client=connection.http_client,
- database=connection.database,
- enable_logging=connection.logging_enabled
- )
- self._shard_id = shard_id
- self._trans_id = transaction_id
- self._timeout = timeout
- self._sync = sync
- self._aql = AQL(self)
- self._type = 'cluster'
-
- def __repr__(self):
- return ''
-
- def handle_request(self, request, handler):
- """Handle the incoming request and response handler.
-
- :param request: the API request to be placed in the server-side queue
- :type request: arango.request.Request
- :param handler: the response handler
- :type handler: callable
- :returns: the test results
- :rtype: dict
- :raises arango.exceptions.ClusterTestError: if the cluster round-trip
- test cannot be executed
- """
- request.headers['X-Shard-ID'] = str(self._shard_id)
- if self._trans_id is not None:
- request.headers['X-Client-Transaction-ID'] = str(self._trans_id)
- if self._timeout is not None:
- request.headers['X-Timeout'] = str(self._timeout)
- if self._sync is True:
- request.headers['X-Synchronous-Mode'] = 'true'
-
- request.endpoint = '/_admin/cluster-test' + request.endpoint + '11'
- res = getattr(self, request.method)(**request.kwargs)
- if res.status_code not in HTTP_OK:
- raise ClusterTestError(res)
- return res.body # pragma: no cover
-
- @property
- def aql(self):
- """Return the AQL object tailored for asynchronous execution.
-
- API requests via the returned query object are placed in a server-side
- in-memory task queue and executed asynchronously in a fire-and-forget
- style.
-
- :returns: ArangoDB query object
- :rtype: arango.query.AQL
- """
- return self._aql
-
- def collection(self, name):
- """Return a collection object tailored for asynchronous execution.
-
- API requests via the returned collection object are placed in a
- server-side in-memory task queue and executed asynchronously in
- a fire-and-forget style.
-
- :param name: the name of the collection
- :type name: str | unicode
- :returns: the collection object
- :rtype: arango.collections.Collection
- """
- return Collection(self, name)
-
- def graph(self, name):
- """Return a graph object tailored for asynchronous execution.
-
- API requests via the returned graph object are placed in a server-side
- in-memory task queue and executed asynchronously in a fire-and-forget
- style.
-
- :param name: the name of the graph
- :type name: str | unicode
- :returns: the graph object
- :rtype: arango.graph.Graph
- """
- return Graph(self, name)
diff --git a/arango/collection.py b/arango/collection.py
new file mode 100644
index 00000000..05f1e713
--- /dev/null
+++ b/arango/collection.py
@@ -0,0 +1,3048 @@
+from __future__ import absolute_import, unicode_literals
+
+from arango.utils import get_id
+
+__all__ = ['StandardCollection', 'VertexCollection', 'EdgeCollection']
+
+from json import dumps
+
+from arango.api import APIWrapper
+from arango.cursor import Cursor
+from arango.exceptions import (
+ CollectionChecksumError,
+ CollectionConfigureError,
+ CollectionLoadError,
+ CollectionPropertiesError,
+ CollectionRenameError,
+ CollectionRevisionError,
+ CollectionRotateJournalError,
+ CollectionStatisticsError,
+ CollectionTruncateError,
+ CollectionUnloadError,
+ DocumentCountError,
+ DocumentInError,
+ DocumentDeleteError,
+ DocumentGetError,
+ DocumentKeysError,
+ DocumentIDsError,
+ DocumentInsertError,
+ DocumentParseError,
+ DocumentReplaceError,
+ DocumentRevisionError,
+ DocumentUpdateError,
+ EdgeListError,
+ IndexCreateError,
+ IndexDeleteError,
+ IndexListError,
+ IndexLoadError,
+)
+from arango.request import Request
+from arango.response import Response
+
+
+class Collection(APIWrapper):
+ """Base class for collection API wrappers.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
+ :param name: Collection name.
+ :type name: str | unicode
+ """
+
+ types = {
+ 2: 'document',
+ 3: 'edge'
+ }
+
+ statuses = {
+ 1: 'new',
+ 2: 'unloaded',
+ 3: 'loaded',
+ 4: 'unloading',
+ 5: 'deleted',
+ 6: 'loading'
+ }
+
+ def __init__(self, connection, executor, name):
+ super(Collection, self).__init__(connection, executor)
+ self._name = name
+ self._id_prefix = name + '/'
+
+ def __iter__(self):
+ return self.all()
+
+ def __len__(self):
+ return self.count()
+
+ def __contains__(self, document):
+ return self.has(document, check_rev=False)
+
+ def _get_status_text(self, code): # pragma: no cover
+ """Return the collection status text.
+
+ :param code: Collection status code.
+ :type code: int
+ :return: Collection status text or None if code is None.
+ :rtype: str | unicode
+ """
+ return None if code is None else self.statuses[code]
+
+ def _format_properties(self, body): # pragma: no cover
+ """Format the collection properties.
+
+ :param body: Response body.
+ :type body: dict
+ :return: Formatted body.
+ :rtype: dict
+ """
+ body.pop('code', None)
+ body.pop('error', None)
+
+ if 'name' not in body:
+ body['name'] = self.name
+ if 'isSystem' in body:
+ body['system'] = body.pop('isSystem')
+ if 'type' in body:
+ body['edge'] = body.pop('type') == 3
+ if 'waitForSync' in body:
+ body['sync'] = body.pop('waitForSync')
+ if 'statusString' in body:
+ body['status'] = body.pop('statusString')
+ elif 'status' in body:
+ body['status'] = self._get_status_text(body['status'])
+ if 'globallyUniqueId' in body:
+ body['global_id'] = body.pop('globallyUniqueId')
+ if 'objectId' in body:
+ body['id'] = body.pop('objectId')
+ if 'cacheEnabled' in body:
+ body['cache'] = body.pop('cacheEnabled')
+ if 'doCompact' in body:
+ body['compact'] = body.pop('doCompact')
+ if 'isVolatile' in body:
+ body['volatile'] = body.pop('isVolatile')
+ if 'shardKeys' in body:
+ body['shard_fields'] = body.pop('shardKeys')
+ if 'replicationFactor' in body:
+ body['replication_factor'] = body.pop('replicationFactor')
+ if 'isSmart' in body:
+ body['smart'] = body.pop('isSmart')
+ if 'indexBuckets' in body:
+ body['index_bucket_count'] = body.pop('indexBuckets')
+ if 'journalSize' in body:
+ body['journal_size'] = body.pop('journalSize')
+ if 'numberOfShards' in body:
+ body['shard_count'] = body.pop('numberOfShards')
+
+ key_options = body.pop('keyOptions', {})
+ if 'type' in key_options:
+ body['key_generator'] = key_options['type']
+ if 'increment' in key_options:
+ body['key_increment'] = key_options['increment']
+ if 'offset' in key_options:
+ body['key_offset'] = key_options['offset']
+ if 'allowUserKeys' in key_options:
+ body['user_keys'] = key_options['allowUserKeys']
+ if 'lastValue' in key_options:
+ body['key_last_value'] = key_options['lastValue']
+ return body
+
+ def _validate_id(self, doc_id):
+ """Check the collection name in the document ID.
+
+ :param doc_id: Document ID.
+ :type doc_id: str | unicode
+ :return: Verified document ID.
+ :rtype: str | unicode
+ :raise arango.exceptions.DocumentParseError: On bad collection name.
+ """
+ if not doc_id.startswith(self._id_prefix):
+ raise DocumentParseError(
+ 'bad collection name in document ID "{}"'.format(doc_id))
+ return doc_id
+
+ def _extract_id(self, body):
+ """Extract the document ID from document body.
+
+ :param body: Document body.
+ :type body: dict
+ :return: Document ID.
+ :rtype: str | unicode
+ :raise arango.exceptions.DocumentParseError: On missing ID and key.
+ """
+ try:
+ if '_id' in body:
+ return self._validate_id(body['_id'])
+ else:
+ return self._id_prefix + body['_key']
+ except KeyError:
+ raise DocumentParseError('field "_key" or "_id" required')
+
+ def _prep_from_body(self, document, check_rev):
+ """Prepare document ID and request headers.
+
+ :param document: Document body.
+ :type document: dict
+ :param check_rev: Whether to check the revision.
+ :type check_rev: bool
+ :return: Document ID and request headers.
+ :rtype: (str | unicode, dict)
+ """
+ doc_id = self._extract_id(document)
+ if not check_rev or '_rev' not in document:
+ return doc_id, {}
+ return doc_id, {'If-Match': document['_rev']}
+
+ def _prep_from_doc(self, document, rev, check_rev):
+ """Prepare document ID, body and request headers.
+
+ :param document: Document ID, key or body.
+ :type document: str | unicode | dict
+ :param rev: Document revision or None.
+ :type rev: str | unicode | None
+ :param check_rev: Whether to check the revision.
+ :type check_rev: bool
+ :return: Document ID, body and request headers.
+ :rtype: (str | unicode, str | unicode | body, dict)
+ """
+ if isinstance(document, dict):
+ doc_id = self._extract_id(document)
+ rev = rev or document.get('_rev')
+
+ if not check_rev or rev is None:
+ return doc_id, doc_id, {}
+ elif self._is_transaction:
+ body = document.copy()
+ body['_rev'] = rev
+ return doc_id, body, {'If-Match': rev}
+ else:
+ return doc_id, doc_id, {'If-Match': rev}
+ else:
+ if '/' in document:
+ doc_id = self._validate_id(document)
+ else:
+ doc_id = self._id_prefix + document
+
+ if not check_rev or rev is None:
+ return doc_id, doc_id, {}
+ elif self._is_transaction:
+ body = {'_id': doc_id, '_rev': rev}
+ return doc_id, body, {'If-Match': rev}
+ else:
+ return doc_id, doc_id, {'If-Match': rev}
+
+ def _ensure_key_in_body(self, body):
+ """Return the document body with "_key" field populated.
+
+ :param body: Document body.
+ :type body: dict
+ :return: Document body with "_key" field.
+ :rtype: dict
+ :raise arango.exceptions.DocumentParseError: On missing ID and key.
+ """
+ if '_key' in body:
+ return body
+ elif '_id' in body:
+ doc_id = self._validate_id(body['_id'])
+ body = body.copy()
+ body['_key'] = doc_id[len(self._id_prefix):]
+ return body
+ raise DocumentParseError('field "_key" or "_id" required')
+
+ def _ensure_key_from_id(self, body):
+ """Return the body with "_key" field if it has "_id" field.
+
+ :param body: Document body.
+ :type body: dict
+ :return: Document body with "_key" field if it has "_id" field.
+ :rtype: dict
+ """
+ if '_id' in body and '_key' not in body:
+ doc_id = self._validate_id(body['_id'])
+ body = body.copy()
+ body['_key'] = doc_id[len(self._id_prefix):]
+ return body
+
+ @property
+ def name(self):
+ """Return collection name.
+
+ :return: Collection name.
+ :rtype: str | unicode
+ """
+ return self._name
+
+ def rename(self, new_name):
+ """Rename the collection.
+
+ Renames may not be reflected immediately in async execution, batch
+ execution or transactions. It is recommended to initialize new API
+ wrappers after a rename.
+
+ :param new_name: New collection name.
+ :type new_name: str | unicode
+ :return: True if collection was renamed successfully.
+ :rtype: bool
+ :raise arango.exceptions.CollectionRenameError: If rename fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/collection/{}/rename'.format(self.name),
+ data={'name': new_name}
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionRenameError(resp, request)
+ self._name = new_name
+ self._id_prefix = new_name + '/'
+ return True
+
+ return self._execute(request, response_handler)
+
+ def properties(self):
+ """Return collection properties.
+
+ :return: Collection properties.
+ :rtype: dict
+ :raise arango.exceptions.CollectionPropertiesError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/collection/{}/properties'.format(self.name),
+ command='db.{}.properties()'.format(self.name),
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionPropertiesError(resp, request)
+ return self._format_properties(resp.body)
+
+ return self._execute(request, response_handler)
+
+ def configure(self, sync=None, journal_size=None):
+ """Configure collection properties.
+
+ :param sync: Block until operations are synchronized to disk.
+ :type sync: bool
+ :param journal_size: Journal size in bytes.
+ :type journal_size: int
+ :return: New collection properties.
+ :rtype: dict
+ :raise arango.exceptions.CollectionConfigureError: If operation fails.
+ """
+ data = {}
+ if sync is not None:
+ data['waitForSync'] = sync
+ if journal_size is not None:
+ data['journalSize'] = journal_size
+
+ request = Request(
+ method='put',
+ endpoint='/_api/collection/{}/properties'.format(self.name),
+ data=data
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionConfigureError(resp, request)
+ return self._format_properties(resp.body)
+
+ return self._execute(request, response_handler)
+
+ def statistics(self):
+ """Return collection statistics.
+
+ :return: Collection statistics.
+ :rtype: dict
+ :raise arango.exceptions.CollectionStatisticsError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/collection/{}/figures'.format(self.name),
+ command='db.{}.figures()'.format(self.name),
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionStatisticsError(resp, request)
+
+ stats = resp.body.get('figures', resp.body)
+ for field in ['compactors', 'datafiles', 'journals']:
+ if field in stats and 'fileSize' in stats[field]:
+ stats[field]['file_size'] = stats[field].pop('fileSize')
+ if 'compactionStatus' in stats:
+ status = stats.pop('compactionStatus')
+ if 'bytesRead' in status:
+ status['bytes_read'] = status.pop('bytesRead')
+ if 'bytesWritten' in status:
+ status['bytes_written'] = status.pop('bytesWritten')
+ if 'filesCombined' in status:
+ status['files_combined'] = status.pop('filesCombined')
+ stats['compaction_status'] = status
+ if 'documentReferences' in stats:
+ stats['document_refs'] = stats.pop('documentReferences')
+ if 'lastTick' in stats:
+ stats['last_tick'] = stats.pop('lastTick')
+ if 'waitingFor' in stats:
+ stats['waiting_for'] = stats.pop('waitingFor')
+ if 'documentsSize' in stats: # pragma: no cover
+ stats['documents_size'] = stats.pop('documentsSize')
+ if 'uncollectedLogfileEntries' in stats:
+ stats['uncollected_logfile_entries'] = \
+ stats.pop('uncollectedLogfileEntries')
+ return stats
+
+ return self._execute(request, response_handler)
+
+ def revision(self):
+ """Return collection revision.
+
+ :return: Collection revision.
+ :rtype: str | unicode
+ :raise arango.exceptions.CollectionRevisionError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/collection/{}/revision'.format(self.name),
+ command='db.{}.revision()'.format(self.name),
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionRevisionError(resp, request)
+ if self._is_transaction:
+ return str(resp.body)
+ return resp.body['revision']
+
+ return self._execute(request, response_handler)
+
+ def checksum(self, with_rev=False, with_data=False):
+ """Return collection checksum.
+
+ :param with_rev: Include document revisions in checksum calculation.
+ :type with_rev: bool
+ :param with_data: Include document data in checksum calculation.
+ :type with_data: bool
+ :return: Collection checksum.
+ :rtype: str | unicode
+ :raise arango.exceptions.CollectionChecksumError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/collection/{}/checksum'.format(self.name),
+ params={'withRevision': with_rev, 'withData': with_data},
+ command='db.{}.checksum()'.format(self.name),
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionChecksumError(resp, request)
+ return resp.body['checksum']
+
+ return self._execute(request, response_handler)
+
+ def load(self):
+ """Load the collection into memory.
+
+ :return: True if collection was loaded successfully.
+ :rtype: bool
+ :raise arango.exceptions.CollectionLoadError: If operation fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/collection/{}/load'.format(self.name)
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionLoadError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ def unload(self):
+ """Unload the collection from memory.
+
+ :return: True if collection was unloaded successfully.
+ :rtype: bool
+ :raise arango.exceptions.CollectionUnloadError: If operation fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/collection/{}/unload'.format(self.name)
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionUnloadError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ def rotate(self):
+ """Rotate the collection journal.
+
+ :return: True if collection journal was rotated successfully.
+ :rtype: bool
+ :raise arango.exceptions.CollectionRotateJournalError: If rotate fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/collection/{}/rotate'.format(self.name),
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionRotateJournalError(resp, request)
+ return True # pragma: no cover
+
+ return self._execute(request, response_handler)
+
+ def truncate(self):
+ """Delete all documents in the collection.
+
+ :return: True if collection was truncated successfully.
+ :rtype: dict
+ :raise arango.exceptions.CollectionTruncateError: If operation fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/collection/{}/truncate'.format(self.name),
+ command='db.{}.truncate()'.format(self.name),
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionTruncateError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ def count(self):
+ """Return the total document count.
+
+ :return: Total document count.
+ :rtype: int
+ :raise arango.exceptions.DocumentCountError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/collection/{}/count'.format(self.name),
+ command='db.{}.count()'.format(self.name),
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentCountError(resp, request)
+ if self._is_transaction:
+ return resp.body
+ return resp.body['count']
+
+ return self._execute(request, response_handler)
+
+ def has(self, document, rev=None, check_rev=True):
+ """Check if a document exists in the collection.
+
+ :param document: Document ID, key or body. Document body must contain
+ the "_id" or "_key" field.
+ :type document: str | unicode | dict
+ :param rev: Expected document revision. Overrides value of "_rev" field
+ in **document** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :return: True if document exists, False otherwise.
+ :rtype: bool
+ :raise arango.exceptions.DocumentInError: If check fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ handle, body, headers = self._prep_from_doc(document, rev, check_rev)
+
+ command = 'db.{}.exists({})'.format(
+ self.name,
+ dumps(body)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='get',
+ endpoint='/_api/document/{}'.format(handle),
+ headers=headers,
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if resp.error_code == 1202:
+ return False
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentInError(resp, request)
+ return bool(resp.body)
+
+ return self._execute(request, response_handler)
+
+ def ids(self):
+ """Return the IDs of all documents in the collection.
+
+ :return: Document ID cursor.
+ :rtype: arango.cursor.Cursor
+ :raise arango.exceptions.DocumentIDsError: If retrieval fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/all-keys',
+ data={'collection': self.name, 'type': 'id'},
+ command='db.{}.toArray().map(d => d._id)'.format(self.name),
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentIDsError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def keys(self):
+ """Return the keys of all documents in the collection.
+
+ :return: Document key cursor.
+ :rtype: arango.cursor.Cursor
+ :raise arango.exceptions.DocumentKeysError: If retrieval fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/all-keys',
+ data={'collection': self.name, 'type': 'key'},
+ command='db.{}.toArray().map(d => d._key)'.format(self.name),
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentKeysError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def all(self, skip=None, limit=None):
+ """Return all documents in the collection.
+
+ :param skip: Number of documents to skip.
+ :type skip: int
+ :param limit: Max number of documents fetched by the cursor. Default
+ value is 100. Values 0 or under are ignored.
+ :type limit: int
+ :return: Document cursor.
+ :rtype: arango.cursor.Cursor
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ """
+ data = {'collection': self.name}
+ if skip is not None:
+ data['skip'] = skip
+ if limit is not None and limit != 0:
+ data['limit'] = limit
+
+ if self._is_transaction:
+ command = 'db.{}.all()'.format(self.name)
+ if skip is not None:
+ command += '.skip({})'.format(skip)
+ if limit is not None:
+ command += '.limit({})'.format(limit)
+ command += '.toArray()'
+ else:
+ command = None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/all',
+ data=data,
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def export(self,
+ limit=None,
+ count=False,
+ batch_size=None,
+ flush=False,
+ flush_wait=None,
+ ttl=None,
+ filter_fields=None,
+ filter_type='include'): # pragma: no cover
+ """Export all documents in the collection using a server cursor.
+
+ :param flush: If set to True, flush the write-ahead log prior to the
+ export. If set to False, documents in the write-ahead log during
+ the export are not included in the result.
+ :type flush: bool
+ :param flush_wait: Max wait time in seconds for write-ahead log flush.
+ :type flush_wait: int
+ :param count: Include the document count in the server cursor.
+ :type count: bool
+ :param batch_size: Max number of documents in the batch fetched by
+ the cursor in one round trip.
+ :type batch_size: int
+ :param limit: Max number of documents fetched by the cursor.
+ :type limit: int
+ :param ttl: Time-to-live for the cursor on the server.
+ :type ttl: int
+ :param filter_fields: Document fields to filter with.
+ :type filter_fields: [str | unicode]
+ :param filter_type: Allowed values are "include" or "exclude".
+ :type filter_type: str | unicode
+ :return: Document cursor.
+ :rtype: arango.cursor.Cursor
+ :raise arango.exceptions.DocumentGetError: If export fails.
+ """
+ data = {'count': count, 'flush': flush}
+ if flush_wait is not None:
+ data['flushWait'] = flush_wait
+ if batch_size is not None:
+ data['batchSize'] = batch_size
+ if limit is not None:
+ data['limit'] = limit
+ if ttl is not None:
+ data['ttl'] = ttl
+ if filter_fields is not None:
+ data['restrict'] = {
+ 'fields': filter_fields,
+ 'type': filter_type
+ }
+ request = Request(
+ method='post',
+ endpoint='/_api/export',
+ params={'collection': self.name},
+ data=data
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ return Cursor(self._conn, resp.body, 'export')
+
+ return self._execute(request, response_handler)
+
+ def find(self, filters, skip=0, limit=100):
+ """Return all documents that match the given filters.
+
+ :param filters: Document filters.
+ :type filters: dict
+ :param skip: Number of documents to skip.
+ :type skip: int
+ :param limit: Max number of documents fetched by the cursor. Default
+ value is 100. Values 0 or under are ignored.
+ :type limit: int
+ :return: Document cursor.
+ :rtype: arango.cursor.Cursor
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ """
+ limit = 100 if limit < 1 else limit
+
+ data = {
+ 'collection': self.name,
+ 'example': filters,
+ 'skip': skip,
+ 'limit': limit
+ }
+
+ command = 'db.{}.byExample({}).skip({}).limit({}).toArray()'.format(
+ self.name,
+ dumps(filters),
+ dumps(skip),
+ dumps(limit)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/by-example',
+ data=data,
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def find_near(self, latitude, longitude, limit=100):
+ """Return documents near a given coordinate.
+
+ Documents returned are sorted according to distance, with the nearest
+ document being the first. If there are documents of equal distance,
+ they are randomly chosen from the set until the limit is reached. A geo
+ index must be defined in the collection to use this method.
+
+ :param latitude: Latitude.
+ :type latitude: int | float
+ :param longitude: Longitude.
+ :type longitude: int | float
+ :param limit: Max number of documents fetched by the cursor. Default
+ value is 100. Values 0 or under are ignored.
+ :type limit: int
+ :returns: Document cursor.
+ :rtype: arango.cursor.Cursor
+ :raises arango.exceptions.DocumentGetError: If retrieval fails.
+ """
+ limit = 100 if limit < 1 else limit
+
+ query = """
+ FOR doc IN NEAR(@collection, @latitude, @longitude, @limit)
+ RETURN doc
+ """
+
+ bind_vars = {
+ 'collection': self._name,
+ 'latitude': latitude,
+ 'longitude': longitude,
+ 'limit': limit
+ }
+
+ command = 'db.{}.near({},{}).limit({}).toArray()'.format(
+ self.name,
+ dumps(latitude),
+ dumps(longitude),
+ dumps(limit)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='post',
+ endpoint='/_api/cursor',
+ data={'query': query, 'bindVars': bind_vars, 'count': True},
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def find_in_range(self,
+ field,
+ lower,
+ upper,
+ skip=0,
+ limit=100):
+ """Return documents within a given range in a random order.
+
+ A skiplist index must be defined in the collection to use this method.
+
+ :param field: Document field name.
+ :type field: str | unicode
+ :param lower: Lower bound (inclusive).
+ :type lower: int
+ :param upper: Upper bound (exclusive).
+ :type upper: int
+ :param skip: Number of documents to skip.
+ :type skip: int
+ :param limit: Max number of documents fetched by the cursor. Default
+ value is 100. Values 0 or under are ignored.
+ :type limit: int
+ :returns: Document cursor.
+ :rtype: arango.cursor.Cursor
+ :raises arango.exceptions.DocumentGetError: If retrieval fails.
+ """
+ limit = 100 if limit < 1 else limit
+
+ query = """
+ FOR doc IN @@collection
+ FILTER doc.@field >= @lower && doc.@field < @upper
+ LIMIT @skip, @limit
+ RETURN doc
+ """
+
+ bind_vars = {
+ '@collection': self._name,
+ 'field': field,
+ 'lower': lower,
+ 'upper': upper,
+ 'skip': skip,
+ 'limit': limit
+ }
+
+ command = 'db.{}.range({},{},{}).skip({}).limit({}).toArray()'.format(
+ self.name,
+ dumps(field),
+ dumps(lower),
+ dumps(upper),
+ dumps(skip),
+ dumps(limit)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='post',
+ endpoint='/_api/cursor',
+ data={'query': query, 'bindVars': bind_vars, 'count': True},
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def find_in_radius(self, latitude, longitude, radius, distance_field=None):
+ """Return documents within a given radius around a coordinate.
+
+ A geo index must be defined in the collection to use this method.
+
+ :param latitude: Latitude.
+ :type latitude: int | float
+ :param longitude: Longitude.
+ :type longitude: int | float
+ :param radius: Max radius.
+ :type radius: int | float
+ :param distance_field: Document field used to indicate the distance to
+ the given coordinate. This parameter is ignored in transactions.
+ :type distance_field: str | unicode
+ :returns: Document cursor.
+ :rtype: arango.cursor.Cursor
+ :raises arango.exceptions.DocumentGetError: If retrieval fails.
+ """
+ query = """
+ FOR doc IN WITHIN(@@collection, @latitude, @longitude, @radius{})
+ RETURN doc
+ """.format('' if distance_field is None else ', @distance')
+
+ bind_vars = {
+ '@collection': self._name,
+ 'latitude': latitude,
+ 'longitude': longitude,
+ 'radius': radius
+ }
+ if distance_field is not None:
+ bind_vars['distance'] = distance_field
+
+ command = 'db.{}.within({},{},{}).toArray()'.format(
+ self.name,
+ dumps(latitude),
+ dumps(longitude),
+ dumps(radius)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='post',
+ endpoint='/_api/cursor',
+ data={'query': query, 'bindVars': bind_vars, 'count': True},
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def find_in_box(self,
+ latitude1,
+ longitude1,
+ latitude2,
+ longitude2,
+ skip=0,
+ limit=100,
+ index=None):
+ """Return all documents in an rectangular area.
+
+ :param latitude1: First latitude.
+ :type latitude1: int | float
+ :param longitude1: First longitude.
+ :type longitude1: int | float
+ :param latitude2: Second latitude.
+ :type latitude2: int | float
+ :param longitude2: Second longitude
+ :type longitude2: int | float
+ :param skip: Number of documents to skip.
+ :type skip: int
+ :param limit: Max number of documents fetched by the cursor. Default
+ value is 100. Values 0 or under are ignored.
+ :type limit: int
+ :param index: ID of the geo index to use (without the collection
+ prefix). This parameter is ignored in transactions.
+ :type index: str | unicode
+ :returns: Document cursor.
+ :rtype: arango.cursor.Cursor
+ :raises arango.exceptions.DocumentGetError: If retrieval fails.
+ """
+ limit = 100 if limit < 1 else limit
+
+ data = {
+ 'collection': self._name,
+ 'latitude1': latitude1,
+ 'longitude1': longitude1,
+ 'latitude2': latitude2,
+ 'longitude2': longitude2,
+ 'skip': skip,
+ 'limit': limit
+ }
+ if index is not None:
+ data['geo'] = self._name + '/' + index
+
+ command = 'db.{}.{}({},{},{},{}).skip({}).limit({}).toArray()'.format(
+ self.name,
+ 'withinRectangle',
+ dumps(latitude1),
+ dumps(longitude1),
+ dumps(latitude2),
+ dumps(longitude2),
+ dumps(skip),
+ dumps(limit)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/within-rectangle',
+ data=data,
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def find_by_text(self, field, query, limit=100):
+ """Return documents that match the given fulltext query.
+
+ :param field: Document field with fulltext index.
+ :type field: str | unicode
+ :param query: Fulltext query.
+ :type query: str | unicode
+ :param limit: Max number of documents fetched by the cursor. Default
+ value is 100. Values 0 or under are ignored.
+ :type limit: int
+ :returns: Document cursor.
+ :rtype: arango.cursor.Cursor
+ :raises arango.exceptions.DocumentGetError: If retrieval fails.
+ """
+ limit = 100 if limit < 1 else limit
+
+ aql = """
+ FOR doc IN FULLTEXT(@collection, @field, @query, @limit)
+ RETURN doc
+ """
+
+ bind_vars = {
+ 'collection': self._name,
+ 'field': field,
+ 'query': query,
+ 'limit': limit,
+ }
+
+ command = 'db.{}.fulltext({},{}).limit({}).toArray()'.format(
+ self.name,
+ dumps(field),
+ dumps(query),
+ dumps(limit)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='post',
+ endpoint='/_api/cursor',
+ data={'query': aql, 'bindVars': bind_vars, 'count': True},
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ return Cursor(self._conn, resp.body)
+
+ return self._execute(request, response_handler)
+
+ def get_many(self, documents):
+ """Return multiple documents ignoring any missing ones.
+
+ :param documents: List of document keys, IDs or bodies. Document bodies
+ must contain the "_id" or "_key" fields.
+ :type documents: [str | unicode | dict]
+ :return: Documents. Missing ones are not included.
+ :rtype: [dict]
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ """
+ handles = [
+ self._extract_id(doc) if isinstance(doc, dict) else doc
+ for doc in documents
+ ]
+
+ command = 'db.{}.document({})'.format(
+ self.name,
+ dumps(handles)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/lookup-by-keys',
+ data={'collection': self.name, 'keys': handles},
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ if self._is_transaction:
+ docs = resp.body
+ else:
+ docs = resp.body['documents']
+ return [doc for doc in docs if '_id' in doc]
+
+ return self._execute(request, response_handler)
+
+ def random(self):
+ """Return a random document from the collection.
+
+ :return: A random document.
+ :rtype: dict
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/any',
+ data={'collection': self.name},
+ command='db.{}.any()'.format(self.name),
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ if self._is_transaction:
+ return resp.body
+ return resp.body['document']
+
+ return self._execute(request, response_handler)
+
+ ####################
+ # Index Management #
+ ####################
+
+ def indexes(self):
+ """Return the collection indexes.
+
+ :return: Collection indexes.
+ :rtype: [dict]
+ :raise arango.exceptions.IndexListError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/index',
+ params={'collection': self.name},
+ command='db.{}.getIndexes()'.format(self.name),
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise IndexListError(resp, request)
+ if self._is_transaction:
+ result = resp.body
+ else:
+ result = resp.body['indexes']
+
+ indexes = []
+ for index in result:
+ index['id'] = index['id'].split('/', 1)[-1]
+ if 'minLength' in index:
+ index['min_length'] = index.pop('minLength')
+ if 'geoJson' in index:
+ index['geo_json'] = index.pop('geoJson')
+ if 'ignoreNull' in index:
+ index['ignore_none'] = index.pop('ignoreNull')
+ if 'selectivityEstimate' in index:
+ index['selectivity'] = index.pop('selectivityEstimate')
+ indexes.append(index)
+ return indexes
+
+ return self._execute(request, response_handler)
+
+ def _add_index(self, data):
+ """Helper method for creating a new index.
+
+ :param data: Index data.
+ :type data: dict
+ :return: New index details.
+ :rtype: dict
+ :raise arango.exceptions.IndexCreateError: If create fails.
+ """
+ request = Request(
+ method='post',
+ endpoint='/_api/index',
+ data=data,
+ params={'collection': self.name}
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise IndexCreateError(resp, request)
+ details = resp.body
+ details['id'] = details['id'].split('/', 1)[1]
+ details.pop('error', None)
+ details.pop('code', None)
+ if 'minLength' in details:
+ details['min_length'] = details.pop('minLength')
+ if 'geoJson' in details:
+ details['geo_json'] = details.pop('geoJson')
+ if 'ignoreNull' in details:
+ details['ignore_none'] = details.pop('ignoreNull')
+ if 'selectivityEstimate' in details:
+ details['selectivity'] = details.pop('selectivityEstimate')
+ if 'isNewlyCreated' in details:
+ details['new'] = details.pop('isNewlyCreated')
+ return details
+
+ return self._execute(request, response_handler)
+
+ def add_hash_index(self,
+ fields,
+ unique=None,
+ sparse=None,
+ deduplicate=None):
+ """Create a new hash index.
+
+ :param fields: Document fields to index.
+ :type fields: [str | unicode]
+ :param unique: Whether the index is unique.
+ :type unique: bool
+ :param sparse: If set to True, documents with None in the field
+ are also indexed. If set to False, they are skipped.
+ :type sparse: bool
+ :param deduplicate: If set to True, inserting duplicate index values
+ from the same document triggers unique constraint errors.
+ :type deduplicate: bool
+ :return: New index details.
+ :rtype: dict
+ :raise arango.exceptions.IndexCreateError: If create fails.
+ """
+ data = {'type': 'hash', 'fields': fields}
+ if unique is not None:
+ data['unique'] = unique
+ if sparse is not None:
+ data['sparse'] = sparse
+ if deduplicate is not None:
+ data['deduplicate'] = deduplicate
+ return self._add_index(data)
+
+ def add_skiplist_index(self,
+ fields,
+ unique=None,
+ sparse=None,
+ deduplicate=None):
+ """Create a new skiplist index.
+
+ :param fields: Document fields to index.
+ :type fields: [str | unicode]
+ :param unique: Whether the index is unique.
+ :type unique: bool
+ :param sparse: If set to True, documents with None in the field
+ are also indexed. If set to False, they are skipped.
+ :type sparse: bool
+ :param deduplicate: If set to True, inserting duplicate index values
+ from the same document triggers unique constraint errors.
+ :type deduplicate: bool
+ :return: New index details.
+ :rtype: dict
+ :raise arango.exceptions.IndexCreateError: If create fails.
+ """
+ data = {'type': 'skiplist', 'fields': fields}
+ if unique is not None:
+ data['unique'] = unique
+ if sparse is not None:
+ data['sparse'] = sparse
+ if deduplicate is not None:
+ data['deduplicate'] = deduplicate
+ return self._add_index(data)
+
+ def add_geo_index(self, fields, ordered=None):
+ """Create a new geo-spatial index.
+
+ :param fields: A single document field or a list of document fields. If
+ a single field is given, the field must have values that are lists
+ with at least two floats. Documents with missing fields or invalid
+ values are excluded.
+ :type fields: str | unicode | list
+ :param ordered: Whether the order is longitude, then latitude.
+ :type ordered: bool
+ :return: New index details.
+ :rtype: dict
+ :raise arango.exceptions.IndexCreateError: If create fails.
+ """
+ data = {'type': 'geo', 'fields': fields}
+ if ordered is not None:
+ data['geoJson'] = ordered
+ return self._add_index(data)
+
+ def add_fulltext_index(self, fields, min_length=None):
+ """Create a new fulltext index.
+
+ :param fields: Document fields to index.
+ :type fields: [str | unicode]
+ :param min_length: Minimum number of characters to index.
+ :type min_length: int
+ :return: New index details.
+ :rtype: dict
+ :raise arango.exceptions.IndexCreateError: If create fails.
+ """
+ data = {'type': 'fulltext', 'fields': fields}
+ if min_length is not None:
+ data['minLength'] = min_length
+ return self._add_index(data)
+
+ def add_persistent_index(self, fields, unique=None, sparse=None):
+ """Create a new persistent index.
+
+ Unique persistent indexes on non-sharded keys are not supported in a
+ cluster.
+
+ :param fields: Document fields to index.
+ :type fields: [str | unicode]
+ :param unique: Whether the index is unique.
+ :type unique: bool
+ :param sparse: Exclude documents that do not contain at least one of
+ the indexed fields, or documents that have a value of None in any
+ of the indexed fields.
+ :type sparse: bool
+ :return: New index details.
+ :rtype: dict
+ :raise arango.exceptions.IndexCreateError: If create fails.
+ """
+ data = {'type': 'persistent', 'fields': fields}
+ if unique is not None:
+ data['unique'] = unique
+ if sparse is not None:
+ data['sparse'] = sparse
+ return self._add_index(data)
+
+ def delete_index(self, index_id, ignore_missing=False):
+ """Delete an index.
+
+ :param index_id: Index ID.
+ :type index_id: str | unicode
+ :param ignore_missing: Do not raise an exception on missing index.
+ :type ignore_missing: bool
+ :return: True if index was deleted successfully, False if index was
+ not found and **ignore_missing** was set to True.
+ :rtype: bool
+ :raise arango.exceptions.IndexDeleteError: If delete fails.
+ """
+ request = Request(
+ method='delete',
+ endpoint='/_api/index/{}/{}'.format(self.name, index_id)
+ )
+
+ def response_handler(resp):
+ if resp.error_code == 1212 and ignore_missing:
+ return False
+ if not resp.is_success:
+ raise IndexDeleteError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ def load_indexes(self):
+ """Cache all indexes in the collection into memory.
+
+ :return: True if index was loaded successfully.
+ :rtype: bool
+ :raise arango.exceptions.IndexLoadError: If operation fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/collection/{}/loadIndexesIntoMemory'.format(
+ self.name
+ )
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise IndexLoadError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+
+class StandardCollection(Collection):
+ """Standard ArangoDB collection API wrapper.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
+ :param name: Collection name.
+ :type name: str | unicode
+ """
+
+ def __init__(self, connection, executor, name):
+ super(StandardCollection, self).__init__(connection, executor, name)
+
+ def __repr__(self):
+ return ''.format(self.name)
+
+ def __getitem__(self, key):
+ return self.get(key)
+
+ def get(self, document, rev=None, check_rev=True):
+ """Return a document.
+
+ :param document: Document ID, key or body. Document body must contain
+ the "_id" or "_key" field.
+ :type document: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **document** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :return: Document, or None if not found.
+ :rtype: dict | None
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ handle, body, headers = self._prep_from_doc(document, rev, check_rev)
+
+ command = 'db.{}.exists({}) || undefined'.format(
+ self.name,
+ dumps(body)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='get',
+ endpoint='/_api/document/{}'.format(handle),
+ headers=headers,
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if resp.error_code == 1202:
+ return None
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def insert(self, document, return_new=False, sync=None, silent=False):
+ """Insert a new document.
+
+ :param document: Document to insert. If it contains the "_key" or "_id"
+ field, the value is used as the key of the new document (otherwise
+ it is auto-generated). Any "_rev" field is ignored.
+ :type document: dict
+ :param return_new: Include body of the new document in the returned
+ metadata. Ignored if parameter **silent** is set to True.
+ :type return_new: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentInsertError: If insert fails.
+ """
+ document = self._ensure_key_from_id(document)
+
+ params = {'returnNew': return_new, 'silent': silent}
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'db.{}.insert({},{})'.format(
+ self.name,
+ dumps(document),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='post',
+ endpoint='/_api/document/{}'.format(self.name),
+ data=document,
+ params=params,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentInsertError(resp, request)
+ return True if silent else resp.body
+
+ return self._execute(request, response_handler)
+
+ def insert_many(self,
+ documents,
+ return_new=False,
+ sync=None,
+ silent=False):
+ """Insert multiple documents.
+
+ If inserting a document fails, the exception object is placed in the
+ result list instead of document metadata.
+
+ :param documents: List of new documents to insert. If they contain the
+ "_key" or "_id" fields, the values are used as the keys of the new
+ documents (auto-generated otherwise). Any "_rev" field is ignored.
+ :type documents: [dict]
+ :param return_new: Include bodies of the new documents in the returned
+ metadata. Ignored if parameter **silent** is set to True
+ :type return_new: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: List of document metadata (e.g. document keys, revisions) and
+ any exception, or True if parameter **silent** was set to True.
+ :rtype: [dict | ArangoError] | bool
+ :raise arango.exceptions.DocumentInsertError: If insert fails.
+ """
+ documents = [self._ensure_key_from_id(doc) for doc in documents]
+
+ params = {'returnNew': return_new, 'silent': silent}
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'db.{}.insert({},{})'.format(
+ self.name,
+ dumps(documents),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='post',
+ endpoint='/_api/document/{}'.format(self.name),
+ data=documents,
+ params=params,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentInsertError(resp, request)
+ if silent is True:
+ return True
+
+ results = []
+ for result in resp.body:
+ if '_id' in result:
+ results.append(result)
+ else:
+ sub_resp = Response(
+ method=resp.method,
+ url=resp.url,
+ headers=resp.headers,
+ status_code=resp.status_code,
+ status_text=resp.status_text,
+ raw_body=result
+ )
+ results.append(DocumentInsertError(sub_resp, request))
+
+ return results
+
+ return self._execute(request, response_handler)
+
+ def update(self,
+ document,
+ check_rev=True,
+ merge=True,
+ keep_none=True,
+ return_new=False,
+ return_old=False,
+ sync=None,
+ silent=False):
+ """Update a document.
+
+ :param document: Partial or full document with the updated values. It
+ must contain the "_id" or "_key" field.
+ :type document: dict
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :param merge: If set to True, sub-dictionaries are merged instead of
+ the new one overwriting the old one.
+ :type merge: bool
+ :param keep_none: If set to True, fields with value None are retained
+ in the document. Otherwise, they are removed completely.
+ :type keep_none: bool
+ :param return_new: Include body of the new document in the result.
+ :type return_new: bool
+ :param return_old: Include body of the old document in the result.
+ :type return_old: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentUpdateError: If update fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ params = {
+ 'keepNull': keep_none,
+ 'mergeObjects': merge,
+ 'returnNew': return_new,
+ 'returnOld': return_old,
+ 'ignoreRevs': not check_rev,
+ 'overwrite': not check_rev,
+ 'silent': silent
+ }
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'db.{col}.update({doc},{doc},{opts})'.format(
+ col=self.name,
+ doc=dumps(document),
+ opts=dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='patch',
+ endpoint='/_api/document/{}'.format(
+ self._extract_id(document)
+ ),
+ data=document,
+ params=params,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ elif not resp.is_success:
+ raise DocumentUpdateError(resp, request)
+ if silent is True:
+ return True
+ resp.body['_old_rev'] = resp.body.pop('_oldRev')
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def update_many(self,
+ documents,
+ check_rev=True,
+ merge=True,
+ keep_none=True,
+ return_new=False,
+ return_old=False,
+ sync=None,
+ silent=False):
+ """Update multiple documents.
+
+ If updating a document fails, the exception object is placed in the
+ result list instead of document metadata.
+
+ :param documents: Partial or full documents with the updated values.
+ They must contain the "_id" or "_key" fields.
+ :type documents: [dict]
+ :param check_rev: If set to True, revisions of **documents** (if given)
+ are compared against the revisions of target documents.
+ :type check_rev: bool
+ :param merge: If set to True, sub-dictionaries are merged instead of
+ the new ones overwriting the old ones.
+ :type merge: bool
+ :param keep_none: If set to True, fields with value None are retained
+ in the document. Otherwise, they are removed completely.
+ :type keep_none: bool
+ :param return_new: Include bodies of the new documents in the result.
+ :type return_new: bool
+ :param return_old: Include bodies of the old documents in the result.
+ :type return_old: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: List of document metadata (e.g. document keys, revisions) and
+ any exceptions, or True if parameter **silent** was set to True.
+ :rtype: [dict | ArangoError] | bool
+ :raise arango.exceptions.DocumentUpdateError: If update fails.
+ """
+ params = {
+ 'keepNull': keep_none,
+ 'mergeObjects': merge,
+ 'returnNew': return_new,
+ 'returnOld': return_old,
+ 'ignoreRevs': not check_rev,
+ 'overwrite': not check_rev,
+ 'silent': silent
+ }
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ documents = [self._ensure_key_in_body(doc) for doc in documents]
+ command = 'db.{col}.update({docs},{docs},{opts})'.format(
+ col=self.name,
+ docs=dumps(documents),
+ opts=dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='patch',
+ endpoint='/_api/document/{}'.format(self.name),
+ data=documents,
+ params=params,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentUpdateError(resp, request)
+ if silent is True:
+ return True
+
+ results = []
+ for result in resp.body:
+ if '_id' not in result:
+ sub_resp = Response(
+ method='patch',
+ url=resp.url,
+ headers=resp.headers,
+ status_code=resp.status_code,
+ status_text=resp.status_text,
+ raw_body=result,
+ )
+ if result['errorNum'] == 1200:
+ result = DocumentRevisionError(sub_resp, request)
+ else:
+ result = DocumentUpdateError(sub_resp, request)
+ else:
+ result['_old_rev'] = result.pop('_oldRev')
+ results.append(result)
+
+ return results
+
+ return self._execute(request, response_handler)
+
+ def update_match(self,
+ filters,
+ body,
+ limit=None,
+ keep_none=True,
+ sync=None,
+ merge=True):
+ """Update matching documents.
+
+ :param filters: Document filters.
+ :type filters: dict
+ :param body: Full or partial document body with the updates.
+ :type body: dict
+ :param limit: Max number of documents to update. If the limit is lower
+ than the number of matched documents, random documents are
+ chosen. This parameter is not supported on sharded collections.
+ :type limit: int
+ :param keep_none: If set to True, fields with value None are retained
+ in the document. Otherwise, they are removed completely.
+ :type keep_none: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param merge: If set to True, sub-dictionaries are merged instead of
+ the new ones overwriting the old ones.
+ :type merge: bool
+ :return: Number of documents updated.
+ :rtype: int
+ :raise arango.exceptions.DocumentUpdateError: If update fails.
+ """
+ data = {
+ 'collection': self.name,
+ 'example': filters,
+ 'newValue': body,
+ 'keepNull': keep_none,
+ 'mergeObjects': merge
+ }
+ if limit is not None:
+ data['limit'] = limit
+ if sync is not None:
+ data['waitForSync'] = sync
+
+ command = 'db.{}.updateByExample({},{},{})'.format(
+ self.name,
+ dumps(filters),
+ dumps(body),
+ dumps(data)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/update-by-example',
+ data=data,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentUpdateError(resp, request)
+ if self._is_transaction:
+ return resp.body
+ return resp.body['updated']
+
+ return self._execute(request, response_handler)
+
+ def replace(self,
+ document,
+ check_rev=True,
+ return_new=False,
+ return_old=False,
+ sync=None,
+ silent=False):
+ """Replace a document.
+
+ :param document: New document to replace the old one with. It must
+ contain the "_id" or "_key" field. Edge document must also have
+ "_from" and "_to" fields.
+ :type document: dict
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :param return_new: Include body of the new document in the result.
+ :type return_new: bool
+ :param return_old: Include body of the old document in the result.
+ :type return_old: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentReplaceError: If replace fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ params = {
+ 'returnNew': return_new,
+ 'returnOld': return_old,
+ 'ignoreRevs': not check_rev,
+ 'overwrite': not check_rev,
+ 'silent': silent
+ }
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'db.{col}.replace({doc},{doc},{opts})'.format(
+ col=self.name,
+ doc=dumps(document),
+ opts=dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/document/{}'.format(
+ self._extract_id(document)
+ ),
+ params=params,
+ data=document,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentReplaceError(resp, request)
+ if silent is True:
+ return True
+ resp.body['_old_rev'] = resp.body.pop('_oldRev')
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def replace_many(self,
+ documents,
+ check_rev=True,
+ return_new=False,
+ return_old=False,
+ sync=None,
+ silent=False):
+ """Replace multiple documents.
+
+ If replacing a document fails, the exception object is placed in the
+ result list instead of document metadata.
+
+ :param documents: New documents to replace the old ones with. They must
+ contain the "_id" or "_key" fields. Edge documents must also have
+ "_from" and "_to" fields.
+ :type documents: [dict]
+ :param check_rev: If set to True, revisions of **documents** (if given)
+ are compared against the revisions of target documents.
+ :type check_rev: bool
+ :param return_new: Include bodies of the new documents in the result.
+ :type return_new: bool
+ :param return_old: Include bodies of the old documents in the result.
+ :type return_old: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: List of document metadata (e.g. document keys, revisions) and
+ any exceptions, or True if parameter **silent** was set to True.
+ :rtype: [dict | ArangoError] | bool
+ :raise arango.exceptions.DocumentReplaceError: If replace fails.
+ """
+ params = {
+ 'returnNew': return_new,
+ 'returnOld': return_old,
+ 'ignoreRevs': not check_rev,
+ 'overwrite': not check_rev,
+ 'silent': silent
+ }
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ documents = [self._ensure_key_in_body(doc) for doc in documents]
+ command = 'db.{col}.replace({docs},{docs},{opts})'.format(
+ col=self.name,
+ docs=dumps(documents),
+ opts=dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/document/{}'.format(self.name),
+ params=params,
+ data=documents,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentReplaceError(resp, request)
+ if silent is True:
+ return True
+
+ results = []
+ for result in resp.body:
+ if '_id' not in result:
+ sub_resp = Response(
+ method=resp.method,
+ url=resp.url,
+ headers=resp.headers,
+ status_code=resp.status_code,
+ status_text=resp.status_text,
+ raw_body=result
+ )
+ if result['errorNum'] == 1200:
+ result = DocumentRevisionError(sub_resp, request)
+ else:
+ result = DocumentReplaceError(sub_resp, request)
+ else:
+ result['_old_rev'] = result.pop('_oldRev')
+ results.append(result)
+
+ return results
+
+ return self._execute(request, response_handler)
+
+ def replace_match(self, filters, body, limit=None, sync=None):
+ """Replace matching documents.
+
+ :param filters: Document filters.
+ :type filters: dict
+ :param body: New document body.
+ :type body: dict
+ :param limit: Max number of documents to replace. If the limit is lower
+ than the number of matched documents, random documents are chosen.
+ :type limit: int
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :return: Number of documents replaced.
+ :rtype: int
+ :raise arango.exceptions.DocumentReplaceError: If replace fails.
+ """
+ data = {
+ 'collection': self.name,
+ 'example': filters,
+ 'newValue': body
+ }
+ if limit is not None:
+ data['limit'] = limit
+ if sync is not None:
+ data['waitForSync'] = sync
+
+ command = 'db.{}.replaceByExample({},{},{})'.format(
+ self.name,
+ dumps(filters),
+ dumps(body),
+ dumps(data)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/replace-by-example',
+ data=data,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentReplaceError(resp, request)
+ if self._is_transaction:
+ return resp.body
+ return resp.body['replaced']
+
+ return self._execute(request, response_handler)
+
+ def delete(self,
+ document,
+ rev=None,
+ check_rev=True,
+ ignore_missing=False,
+ return_old=False,
+ sync=None,
+ silent=False):
+ """Delete a document.
+
+ :param document: Document ID, key or body. Document body must contain
+ the "_id" or "_key" field.
+ :type document: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **document** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :param ignore_missing: Do not raise an exception on missing document.
+ This parameter has no effect in transactions where an exception is
+ always raised on failures.
+ :type ignore_missing: bool
+ :param return_old: Include body of the old document in the result.
+ :type return_old: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision), or True if
+ parameter **silent** was set to True, or False if document was not
+ found and **ignore_missing** was set to True (does not apply in
+ transactions).
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentDeleteError: If delete fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ handle, body, headers = self._prep_from_doc(document, rev, check_rev)
+
+ params = {
+ 'returnOld': return_old,
+ 'ignoreRevs': not check_rev,
+ 'overwrite': not check_rev,
+ 'silent': silent
+ }
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'db.{}.remove({},{})'.format(
+ self.name,
+ dumps(body),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='delete',
+ endpoint='/_api/document/{}'.format(handle),
+ params=params,
+ headers=headers,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if resp.error_code == 1202 and ignore_missing:
+ return False
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentDeleteError(resp, request)
+ return True if silent else resp.body
+
+ return self._execute(request, response_handler)
+
+ def delete_many(self,
+ documents,
+ return_old=False,
+ check_rev=True,
+ sync=None,
+ silent=False):
+ """Delete multiple documents.
+
+ If deleting a document fails, the exception object is placed in the
+ result list instead of document metadata.
+
+ :param documents: Document IDs, keys or bodies. Document bodies must
+ contain the "_id" or "_key" fields.
+ :type documents: [str | unicode | dict]
+ :param return_old: Include bodies of the old documents in the result.
+ :type return_old: bool
+ :param check_rev: If set to True, revisions of **documents** (if given)
+ are compared against the revisions of target documents.
+ :type check_rev: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: List of document metadata (e.g. document keys, revisions) and
+ any exceptions, or True if parameter **silent** was set to True.
+ :rtype: [dict | ArangoError] | bool
+ :raise arango.exceptions.DocumentDeleteError: If delete fails.
+ """
+ params = {
+ 'returnOld': return_old,
+ 'ignoreRevs': not check_rev,
+ 'overwrite': not check_rev,
+ 'silent': silent
+ }
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ documents = [
+ self._ensure_key_in_body(doc) if isinstance(doc, dict) else doc
+ for doc in documents
+ ]
+ command = 'db.{}.remove({},{})'.format(
+ self.name,
+ dumps(documents),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='delete',
+ endpoint='/_api/document/{}'.format(self.name),
+ params=params,
+ data=documents,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentDeleteError(resp, request)
+ if silent is True:
+ return True
+
+ results = []
+ for result in resp.body:
+ if '_id' not in result:
+ sub_resp = Response(
+ method=resp.method,
+ url=resp.url,
+ headers=resp.headers,
+ status_code=resp.status_code,
+ status_text=resp.status_text,
+ raw_body=result
+ )
+ if result['errorNum'] == 1200:
+ result = DocumentRevisionError(sub_resp, request)
+ else:
+ result = DocumentDeleteError(sub_resp, request)
+ results.append(result)
+
+ return results
+
+ return self._execute(request, response_handler)
+
+ def delete_match(self, filters, limit=None, sync=None):
+ """Delete matching documents.
+
+ :param filters: Document filters.
+ :type filters: dict
+ :param limit: Max number of documents to delete. If the limit is lower
+ than the number of matched documents, random documents are chosen.
+ :type limit: int
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :return: Number of documents deleted.
+ :rtype: dict
+ :raise arango.exceptions.DocumentDeleteError: If delete fails.
+ """
+ data = {'collection': self.name, 'example': filters}
+ if sync is not None:
+ data['waitForSync'] = sync
+ if limit is not None and limit != 0:
+ data['limit'] = limit
+
+ command = 'db.{}.removeByExample({},{})'.format(
+ self.name,
+ dumps(filters),
+ dumps(data)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/simple/remove-by-example',
+ data=data,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentDeleteError(resp, request)
+ if self._is_transaction:
+ return resp.body
+ return resp.body['deleted']
+
+ return self._execute(request, response_handler)
+
+ def import_bulk(self,
+ documents,
+ halt_on_error=True,
+ details=True,
+ from_prefix=None,
+ to_prefix=None,
+ overwrite=None,
+ on_duplicate=None,
+ sync=None):
+ """Insert multiple documents into the collection.
+
+ This is faster than :func:`arango.collection.Collection.insert_many`
+ but does not return as much information.
+
+ :param documents: List of new documents to insert. If they contain the
+ "_key" or "_id" fields, the values are used as the keys of the new
+ documents (auto-generated otherwise). Any "_rev" field is ignored.
+ :type documents: [dict]
+ :param halt_on_error: Halt the entire import on an error.
+ :type halt_on_error: bool
+ :param details: If set to True, the returned result will include an
+ additional list of detailed error messages.
+ :type details: bool
+ :param from_prefix: String prefix prepended to the value of "_from"
+ field in each edge document inserted. For example, prefix "foo"
+ prepended to "_from": "bar" will result in "_from": "foo/bar".
+ Applies only to edge collections.
+ :type from_prefix: str | unicode
+ :param to_prefix: String prefix prepended to the value of "_to" field
+ in edge document inserted. For example, prefix "foo" prepended to
+ "_to": "bar" will result in "_to": "foo/bar". Applies only to edge
+ collections.
+ :type to_prefix: str | unicode
+ :param overwrite: If set to True, all existing documents are removed
+ prior to the import. Indexes are still preserved.
+ :type overwrite: bool
+ :param on_duplicate: Action to take on unique key constraint violations
+ (for documents with "_key" fields). Allowed values are "error" (do
+ not import the new documents and count them as errors), "update"
+ (update the existing documents while preserving any fields missing
+ in the new ones), "replace" (replace the existing documents with
+ new ones), and "ignore" (do not import the new documents and count
+ them as ignored, as opposed to counting them as errors). Options
+ "update" and "replace" may fail on secondary unique key constraint
+ violations.
+ :type on_duplicate: str | unicode
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :return: Result of the bulk import.
+ :rtype: dict
+ :raise arango.exceptions.DocumentInsertError: If import fails.
+ """
+ documents = [self._ensure_key_from_id(doc) for doc in documents]
+
+ params = {
+ 'type': 'array',
+ 'collection': self.name,
+ 'complete': halt_on_error,
+ 'details': details,
+ }
+ if halt_on_error is not None:
+ params['complete'] = halt_on_error
+ if details is not None:
+ params['details'] = details
+ if from_prefix is not None:
+ params['fromPrefix'] = from_prefix
+ if to_prefix is not None:
+ params['toPrefix'] = to_prefix
+ if overwrite is not None:
+ params['overwrite'] = overwrite
+ if on_duplicate is not None:
+ params['onDuplicate'] = on_duplicate
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ request = Request(
+ method='post',
+ endpoint='/_api/import',
+ data=documents,
+ params=params
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentInsertError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+
+class VertexCollection(Collection):
+ """Vertex collection API wrapper.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
+ :param graph: Graph name.
+ :type graph: str | unicode
+ :param name: Vertex collection name.
+ :type name: str | unicode
+ """
+
+ def __init__(self, connection, executor, graph, name):
+ super(VertexCollection, self).__init__(connection, executor, name)
+ self._graph = graph
+
+ def __repr__(self):
+ return ''.format(self.name)
+
+ def __getitem__(self, key):
+ return self.get(key)
+
+ @property
+ def graph(self):
+ """Return the graph name.
+
+ :return: Graph name.
+ :rtype: str | unicode
+ """
+ return self._graph
+
+ def get(self, vertex, rev=None, check_rev=True):
+ """Return a vertex document.
+
+ :param vertex: Vertex document ID, key or body. Document body must
+ contain the "_id" or "_key" field.
+ :type vertex: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **vertex** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **vertex** (if given) is
+ compared against the revision of target vertex document.
+ :type check_rev: bool
+ :return: Vertex document or None if not found.
+ :rtype: dict | None
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ handle, body, headers = self._prep_from_doc(vertex, rev, check_rev)
+
+ command = 'gm._graph("{}").{}.document({})'.format(
+ self.graph,
+ self.name,
+ dumps(body)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='get',
+ endpoint='/_api/gharial/{}/vertex/{}'.format(
+ self._graph, handle
+ ),
+ headers=headers,
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if resp.error_code == 1202:
+ return None
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ if self._is_transaction:
+ return resp.body
+ return resp.body['vertex']
+
+ return self._execute(request, response_handler)
+
+ def insert(self, vertex, sync=None, silent=False):
+ """Insert a new vertex document.
+
+ :param vertex: New vertex document to insert. If it has "_key" or "_id"
+ field, its value is used as key of the new vertex (otherwise it is
+ auto-generated). Any "_rev" field is ignored.
+ :type vertex: dict
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentInsertError: If insert fails.
+ """
+ vertex = self._ensure_key_from_id(vertex)
+
+ params = {'silent': silent}
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'gm._graph("{}").{}.save({},{})'.format(
+ self.graph,
+ self.name,
+ dumps(vertex),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='post',
+ endpoint='/_api/gharial/{}/vertex/{}'.format(
+ self._graph, self.name
+ ),
+ data=vertex,
+ params=params,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentInsertError(resp, request)
+ if silent is True:
+ return True
+ if self._is_transaction:
+ return resp.body
+ return resp.body['vertex']
+
+ return self._execute(request, response_handler)
+
+ def update(self,
+ vertex,
+ check_rev=True,
+ keep_none=True,
+ sync=None,
+ silent=False):
+ """Update a vertex document.
+
+ :param vertex: Partial or full vertex document with updated values. It
+ must contain the "_key" or "_id" field.
+ :type vertex: dict
+ :param check_rev: If set to True, revision of **vertex** (if given) is
+ compared against the revision of target vertex document.
+ :type check_rev: bool
+ :param keep_none: If set to True, fields with value None are retained
+ in the document. If set to False, they are removed completely.
+ :type keep_none: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentUpdateError: If update fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ vertex_id, headers = self._prep_from_body(vertex, check_rev)
+
+ params = {
+ 'keepNull': keep_none,
+ 'overwrite': not check_rev,
+ 'silent': silent
+ }
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'gm._graph("{}").{}.update("{}",{},{})'.format(
+ self.graph,
+ self.name,
+ vertex_id,
+ dumps(vertex),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='patch',
+ endpoint='/_api/gharial/{}/vertex/{}'.format(
+ self._graph, vertex_id
+ ),
+ headers=headers,
+ params=params,
+ data=vertex,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ elif not resp.is_success:
+ raise DocumentUpdateError(resp, request)
+ if silent is True:
+ return True
+ if self._is_transaction:
+ result = resp.body
+ else:
+ result = resp.body['vertex']
+ result['_old_rev'] = result.pop('_oldRev')
+ return result
+
+ return self._execute(request, response_handler)
+
+ def replace(self, vertex, check_rev=True, sync=None, silent=False):
+ """Replace a vertex document.
+
+ :param vertex: New vertex document to replace the old one with. It must
+ contain the "_key" or "_id" field.
+ :type vertex: dict
+ :param check_rev: If set to True, revision of **vertex** (if given) is
+ compared against the revision of target vertex document.
+ :type check_rev: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentReplaceError: If replace fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ vertex_id, headers = self._prep_from_body(vertex, check_rev)
+
+ params = {'silent': silent}
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'gm._graph("{}").{}.replace("{}",{},{})'.format(
+ self.graph,
+ self.name,
+ vertex_id,
+ dumps(vertex),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/gharial/{}/vertex/{}'.format(
+ self._graph, vertex_id
+ ),
+ headers=headers,
+ params=params,
+ data=vertex,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ elif not resp.is_success:
+ raise DocumentReplaceError(resp, request)
+ if silent is True:
+ return True
+ if self._is_transaction:
+ result = resp.body
+ else:
+ result = resp.body['vertex']
+ result['_old_rev'] = result.pop('_oldRev')
+ return result
+
+ return self._execute(request, response_handler)
+
+ def delete(self,
+ vertex,
+ rev=None,
+ check_rev=True,
+ ignore_missing=False,
+ sync=None):
+ """Delete a vertex document.
+
+ :param vertex: Vertex document ID, key or body. Document body must
+ contain the "_id" or "_key" field.
+ :type vertex: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **vertex** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **vertex** (if given) is
+ compared against the revision of target vertex document.
+ :type check_rev: bool
+ :param ignore_missing: Do not raise an exception on missing document.
+ This parameter has no effect in transactions where an exception is
+ always raised on failures.
+ :type ignore_missing: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :return: True if vertex was deleted successfully, False if vertex was
+ not found and **ignore_missing** was set to True (does not apply in
+ transactions).
+ :rtype: bool
+ :raise arango.exceptions.DocumentDeleteError: If delete fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ handle, _, headers = self._prep_from_doc(vertex, rev, check_rev)
+
+ params = {} if sync is None else {'waitForSync': sync}
+ command = 'gm._graph("{}").{}.remove("{}",{})'.format(
+ self.graph,
+ self.name,
+ handle,
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='delete',
+ endpoint='/_api/gharial/{}/vertex/{}'.format(
+ self._graph, handle
+ ),
+ params=params,
+ headers=headers,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if resp.error_code == 1202 and ignore_missing:
+ return False
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentDeleteError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+
+class EdgeCollection(Collection):
+ """ArangoDB edge collection API wrapper.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
+ :param graph: Graph name.
+ :type graph: str | unicode
+ :param name: Edge collection name.
+ :type name: str | unicode
+ """
+
+ def __init__(self, connection, executor, graph, name):
+ super(EdgeCollection, self).__init__(connection, executor, name)
+ self._graph = graph
+
+ def __repr__(self):
+ return ''.format(self.name)
+
+ def __getitem__(self, key):
+ return self.get(key)
+
+ @property
+ def graph(self):
+ """Return the graph name.
+
+ :return: Graph name.
+ :rtype: str | unicode
+ """
+ return self._graph
+
+ def get(self, edge, rev=None, check_rev=True):
+ """Return an edge document.
+
+ :param edge: Edge document ID, key or body. Document body must contain
+ the "_id" or "_key" field.
+ :type edge: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **edge** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **edge** (if given) is
+ compared against the revision of target edge document.
+ :type check_rev: bool
+ :return: Edge document or None if not found.
+ :rtype: dict | None
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ handle, body, headers = self._prep_from_doc(edge, rev, check_rev)
+
+ command = 'gm._graph("{}").{}.document({})'.format(
+ self.graph,
+ self.name,
+ dumps(body)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='get',
+ endpoint='/_api/gharial/{}/edge/{}'.format(
+ self._graph, handle
+ ),
+ headers=headers,
+ command=command,
+ read=self.name
+ )
+
+ def response_handler(resp):
+ if resp.error_code == 1202:
+ return None
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentGetError(resp, request)
+ if self._is_transaction:
+ return resp.body
+ return resp.body['edge']
+
+ return self._execute(request, response_handler)
+
+ def insert(self, edge, sync=None, silent=False):
+ """Insert a new edge document.
+
+ :param edge: New edge document to insert. It must contain "_from" and
+ "_to" fields. If it has "_key" or "_id" field, its value is used
+ as key of the new edge document (otherwise it is auto-generated).
+ Any "_rev" field is ignored.
+ :type edge: dict
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentInsertError: If insert fails.
+ """
+ edge = self._ensure_key_from_id(edge)
+
+ params = {'silent': silent}
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'gm._graph("{}").{}.save("{}","{}",{},{})'.format(
+ self.graph,
+ self.name,
+ edge['_from'],
+ edge['_to'],
+ dumps(edge),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='post',
+ endpoint='/_api/gharial/{}/edge/{}'.format(
+ self._graph, self.name
+ ),
+ data=edge,
+ params=params,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DocumentInsertError(resp, request)
+ if silent is True:
+ return True
+ if self._is_transaction:
+ return resp.body
+ return resp.body['edge']
+
+ return self._execute(request, response_handler)
+
+ def update(self,
+ edge,
+ check_rev=True,
+ keep_none=True,
+ sync=None,
+ silent=False):
+ """Update an edge document.
+
+ :param edge: Partial or full edge document with updated values. It must
+ contain the "_key" or "_id" field.
+ :type edge: dict
+ :param check_rev: If set to True, revision of **edge** (if given) is
+ compared against the revision of target edge document.
+ :type check_rev: bool
+ :param keep_none: If set to True, fields with value None are retained
+ in the document. If set to False, they are removed completely.
+ :type keep_none: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentUpdateError: If update fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ edge_id, headers = self._prep_from_body(edge, check_rev)
+
+ params = {
+ 'keepNull': keep_none,
+ 'overwrite': not check_rev,
+ 'silent': silent
+ }
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'gm._graph("{}").{}.update("{}",{},{})'.format(
+ self.graph,
+ self.name,
+ edge_id,
+ dumps(edge),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='patch',
+ endpoint='/_api/gharial/{}/edge/{}'.format(
+ self._graph, edge_id
+ ),
+ headers=headers,
+ params=params,
+ data=edge,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentUpdateError(resp, request)
+ if silent is True:
+ return True
+ if self._is_transaction:
+ result = resp.body
+ else:
+ result = resp.body['edge']
+ result['_old_rev'] = result.pop('_oldRev')
+ return result
+
+ return self._execute(request, response_handler)
+
+ def replace(self, edge, check_rev=True, sync=None, silent=False):
+ """Replace an edge document.
+
+ :param edge: New edge document to replace the old one with. It must
+ contain the "_key" or "_id" field. It must also contain the "_from"
+ and "_to" fields.
+ :type edge: dict
+ :param check_rev: If set to True, revision of **edge** (if given) is
+ compared against the revision of target edge document.
+ :type check_rev: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentReplaceError: If replace fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ edge_id, headers = self._prep_from_body(edge, check_rev)
+
+ params = {'silent': silent}
+ if sync is not None:
+ params['waitForSync'] = sync
+
+ command = 'gm._graph("{}").{}.replace("{}",{},{})'.format(
+ self.graph,
+ self.name,
+ edge_id,
+ dumps(edge),
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='put',
+ endpoint='/_api/gharial/{}/edge/{}'.format(
+ self._graph, edge_id
+ ),
+ headers=headers,
+ params=params,
+ data=edge,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentReplaceError(resp, request)
+ if silent is True:
+ return True
+ if self._is_transaction:
+ result = resp.body
+ else:
+ result = resp.body['edge']
+ result['_old_rev'] = result.pop('_oldRev')
+ return result
+
+ return self._execute(request, response_handler)
+
+ def delete(self,
+ edge,
+ rev=None,
+ check_rev=True,
+ ignore_missing=False,
+ sync=None):
+ """Delete an edge document.
+
+ :param edge: Edge document ID, key or body. Document body must contain
+ the "_id" or "_key" field.
+ :type edge: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **edge** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **edge** (if given) is
+ compared against the revision of target edge document.
+ :type check_rev: bool
+ :param ignore_missing: Do not raise an exception on missing document.
+ This parameter has no effect in transactions where an exception is
+ always raised on failures.
+ :type ignore_missing: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :return: True if edge was deleted successfully, False if edge was not
+ found and **ignore_missing** was set to True (does not apply in
+ transactions).
+ :rtype: bool
+ :raise arango.exceptions.DocumentDeleteError: If delete fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ handle, _, headers = self._prep_from_doc(edge, rev, check_rev)
+
+ params = {} if sync is None else {'waitForSync': sync}
+ command = 'gm._graph("{}").{}.remove("{}",{})'.format(
+ self.graph,
+ self.name,
+ handle,
+ dumps(params)
+ ) if self._is_transaction else None
+
+ request = Request(
+ method='delete',
+ endpoint='/_api/gharial/{}/edge/{}'.format(
+ self._graph, handle
+ ),
+ params=params,
+ headers=headers,
+ command=command,
+ write=self.name
+ )
+
+ def response_handler(resp):
+ if resp.error_code == 1202 and ignore_missing:
+ return False
+ if resp.status_code == 412:
+ raise DocumentRevisionError(resp, request)
+ if not resp.is_success:
+ raise DocumentDeleteError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ def link(self, from_vertex, to_vertex, data=None, sync=None, silent=False):
+ """Insert a new edge document linking the given vertices.
+
+ :param from_vertex: "From" vertex document ID or body with "_id" field.
+ :type from_vertex: str | unicode | dict
+ :param to_vertex: "To" vertex document ID or body with "_id" field.
+ :type to_vertex: str | unicode | dict
+ :param data: Any extra data for the new edge document. If it has "_key"
+ or "_id" field, its value is used as key of the new edge document
+ (otherwise it is auto-generated).
+ :type data: dict
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentInsertError: If insert fails.
+ """
+ edge = {
+ '_from': get_id(from_vertex),
+ '_to': get_id(to_vertex)
+ }
+ if data is not None:
+ edge.update(self._ensure_key_from_id(data))
+ return self.insert(edge, sync=sync, silent=silent)
+
+ def edges(self, vertex, direction=None):
+ """Return the edge documents coming in and/or out of the vertex.
+
+ :param vertex: Vertex document ID or body with "_id" field.
+ :type vertex: str | unicode | dict
+ :param direction: The direction of the edges. Allowed values are "in"
+ and "out". If not set, edges in both directions are returned.
+ :type direction: str | unicode
+ :return: List of edges and statistics.
+ :rtype: dict
+ :raise arango.exceptions.EdgeListError: If retrieval fails.
+ """
+ params = {'vertex': get_id(vertex)}
+ if direction is not None:
+ params['direction'] = direction
+
+ request = Request(
+ method='get',
+ endpoint='/_api/edges/{}'.format(self.name),
+ params=params
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise EdgeListError(resp, request)
+ stats = resp.body['stats']
+ return {
+ 'edges': resp.body['edges'],
+ 'stats': {
+ 'filtered': stats['filtered'],
+ 'scanned_index': stats['scannedIndex'],
+ }
+ }
+
+ return self._execute(request, response_handler)
diff --git a/arango/collections/__init__.py b/arango/collections/__init__.py
deleted file mode 100644
index 2ce6a202..00000000
--- a/arango/collections/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from arango.collections.standard import Collection
-from arango.collections.vertex import VertexCollection
-from arango.collections.edge import EdgeCollection
diff --git a/arango/collections/base.py b/arango/collections/base.py
deleted file mode 100644
index d1afb0ce..00000000
--- a/arango/collections/base.py
+++ /dev/null
@@ -1,1262 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from arango.api import APIWrapper, api_method
-from arango.cursor import Cursor, ExportCursor
-from arango.exceptions import *
-from arango.request import Request
-from arango.utils import HTTP_OK
-
-
-class BaseCollection(APIWrapper):
- """Base ArangoDB collection.
-
- :param connection: ArangoDB connection object
- :type connection: arango.connection.Connection
- :param name: the name of the collection
- :type name: str | unicode
- """
-
- TYPES = {
- 2: 'document',
- 3: 'edge'
- }
-
- STATUSES = {
- 1: 'new',
- 2: 'unloaded',
- 3: 'loaded',
- 4: 'unloading',
- 5: 'deleted',
- 6: 'loading'
- }
-
- def __init__(self, connection, name):
- self._conn = connection
- self._name = name
-
- def __iter__(self):
- """Iterate through the documents in the collection.
-
- :returns: the document cursor
- :rtype: arango.cursor.Cursor
- :raises arango.exceptions.DocumentGetError: if the documents cannot
- be fetched from the collection
- """
- res = self._conn.put(
- endpoint='/_api/simple/all',
- data={'collection': self._name}
- )
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return Cursor(self._conn, res.body)
-
- def __len__(self):
- """Return the number of documents in the collection.
-
- :returns: the number of documents
- :rtype: int
- :raises arango.exceptions.DocumentCountError: if the document
- count cannot be retrieved
- """
- res = self._conn.get('/_api/collection/{}/count'.format(self._name))
- if res.status_code not in HTTP_OK:
- raise DocumentCountError(res)
- return res.body['count']
-
- def __getitem__(self, key):
- """Return a document by its key from the collection.
-
- :param key: the document key
- :type key: str | unicode
- :returns: the document
- :rtype: dict
- :raises arango.exceptions.DocumentGetError: if the document cannot
- be fetched from the collection
- """
- res = self._conn.get('/_api/document/{}/{}'.format(self._name, key))
- if res.status_code == 404 and res.error_code == 1202:
- return None
- elif res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return res.body
-
- def __contains__(self, key):
- """Check if a document exists in the collection by its key.
-
- :param key: the document key
- :type key: dict | str | unicode
- :returns: whether the document exists
- :rtype: bool
- :raises arango.exceptions.DocumentInError: if the check cannot
- be executed
- """
- res = self._conn.get('/_api/document/{}/{}'.format(self._name, key))
- if res.status_code == 404 and res.error_code == 1202:
- return False
- elif res.status_code in HTTP_OK:
- return True
- raise DocumentInError(res)
-
- def _status(self, code):
- """Return the collection status text.
-
- :param code: the collection status code
- :type code: int
- :returns: the collection status text or ``None``
- :rtype: str | unicode | None
- :raises arango.exceptions.CollectionBadStatusError: if the collection
- status code is unknown
- """
- if code is None: # pragma: no cover
- return None
- try:
- return self.STATUSES[code]
- except KeyError:
- raise CollectionBadStatusError(
- 'Unknown status code {}'.format(code)
- )
-
- @property
- def name(self):
- """Return the name of the collection.
-
- :returns: the name of the collection
- :rtype: str | unicode
- """
- return self._name
-
- @property
- def database(self):
- """Return the name of the database the collection belongs to.
-
- :returns: The name of the database.
- :rtype: str | unicode
- """
- return self._conn.database
-
- @api_method
- def rename(self, new_name):
- """Rename the collection.
-
- :param new_name: the new name for the collection
- :type new_name: str | unicode
- :returns: the new collection details
- :rtype: dict
- :raises arango.exceptions.CollectionRenameError: if the collection
- name cannot be changed
- """
- request = Request(
- method='put',
- endpoint='/_api/collection/{}/rename'.format(self._name),
- data={'name': new_name}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionRenameError(res)
- self._name = new_name
- return {
- 'id': res.body['id'],
- 'is_system': res.body['isSystem'],
- 'name': res.body['name'],
- 'status': self._status(res.body['status']),
- 'type': self.TYPES[res.body['type']]
- }
-
- return request, handler
-
- @api_method
- def statistics(self):
- """Return the collection statistics.
-
- :returns: the collection statistics
- :rtype: dict
- :raises arango.exceptions.CollectionStatisticsError: if the
- collection statistics cannot be retrieved
- """
- request = Request(
- method='get',
- endpoint='/_api/collection/{}/figures'.format(self._name)
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionStatisticsError(res)
- stats = res.body['figures']
- stats['compaction_status'] = stats.pop('compactionStatus', None)
- stats['document_refs'] = stats.pop('documentReferences', None)
- stats['last_tick'] = stats.pop('lastTick', None)
- stats['waiting_for'] = stats.pop('waitingFor', None)
- stats['uncollected_logfile_entries'] = stats.pop(
- 'uncollectedLogfileEntries', None
- )
- return stats
-
- return request, handler
-
- @api_method
- def revision(self):
- """Return the collection revision.
-
- :returns: the collection revision
- :rtype: str | unicode
- :raises arango.exceptions.CollectionRevisionError: if the
- collection revision cannot be retrieved
- """
- request = Request(
- method='get',
- endpoint='/_api/collection/{}/revision'.format(self._name)
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionRevisionError(res)
- return res.body['revision']
-
- return request, handler
-
- @api_method
- def properties(self):
- """Return the collection properties.
-
- :returns: The collection properties.
- :rtype: dict
- :raises arango.exceptions.CollectionPropertiesError: If the
- collection properties cannot be retrieved.
- """
- request = Request(
- method='get',
- endpoint='/_api/collection/{}/properties'.format(self._name)
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionPropertiesError(res)
-
- key_options = res.body.get('keyOptions', {})
-
- return {
- 'id': res.body.get('id'),
- 'name': res.body.get('name'),
- 'edge': res.body.get('type') == 3,
- 'sync': res.body.get('waitForSync'),
- 'status': self._status(res.body.get('status')),
- 'compact': res.body.get('doCompact'),
- 'system': res.body.get('isSystem'),
- 'volatile': res.body.get('isVolatile'),
- 'journal_size': res.body.get('journalSize'),
- 'keygen': key_options.get('type'),
- 'user_keys': key_options.get('allowUserKeys'),
- 'key_increment': key_options.get('increment'),
- 'key_offset': key_options.get('offset')
- }
-
- return request, handler
-
- @api_method
- def configure(self, sync=None, journal_size=None):
- """Configure the collection properties.
-
- Only *sync* and *journal_size* properties are configurable.
-
- :param sync: Wait for the operation to sync to disk.
- :type sync: bool
- :param journal_size: The journal size.
- :type journal_size: int
- :returns: the new collection properties
- :rtype: dict
- :raises arango.exceptions.CollectionConfigureError: if the
- collection properties cannot be configured
- """
- data = {}
- if sync is not None:
- data['waitForSync'] = sync
- if journal_size is not None:
- data['journalSize'] = journal_size
-
- request = Request(
- method='put',
- endpoint='/_api/collection/{}/properties'.format(self._name),
- data=data
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionConfigureError(res)
-
- key_options = res.body.get('keyOptions', {})
-
- return {
- 'id': res.body.get('id'),
- 'name': res.body.get('name'),
- 'edge': res.body.get('type') == 3,
- 'sync': res.body.get('waitForSync'),
- 'status': self._status(res.body.get('status')),
- 'compact': res.body.get('doCompact'),
- 'system': res.body.get('isSystem'),
- 'volatile': res.body.get('isVolatile'),
- 'journal_size': res.body.get('journalSize'),
- 'keygen': key_options.get('type'),
- 'user_keys': key_options.get('allowUserKeys'),
- 'key_increment': key_options.get('increment'),
- 'key_offset': key_options.get('offset')
- }
-
- return request, handler
-
- @api_method
- def load(self):
- """Load the collection into memory.
-
- :returns: the collection status
- :rtype: str | unicode
- :raises arango.exceptions.CollectionLoadError: if the collection
- cannot be loaded into memory
- """
- request = Request(
- method='put',
- endpoint='/_api/collection/{}/load'.format(self._name),
- command='db.{}.unload()'.format(self._name)
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionLoadError(res)
- return self._status(res.body['status'])
-
- return request, handler
-
- @api_method
- def unload(self):
- """Unload the collection from memory.
-
- :returns: the collection status
- :rtype: str | unicode
- :raises arango.exceptions.CollectionUnloadError: if the collection
- cannot be unloaded from memory
- """
- request = Request(
- method='put',
- endpoint='/_api/collection/{}/unload'.format(self._name),
- command='db.{}.unload()'.format(self._name)
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionUnloadError(res)
- return self._status(res.body['status'])
-
- return request, handler
-
- @api_method
- def rotate(self):
- """Rotate the collection journal.
-
- :returns: the result of the operation
- :rtype: dict
- :raises arango.exceptions.CollectionRotateJournalError: if the
- collection journal cannot be rotated
- """
- request = Request(
- method='put',
- endpoint='/_api/collection/{}/rotate'.format(self._name),
- command='db.{}.rotate()'.format(self._name)
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionRotateJournalError(res)
- return res.body['result'] # pragma: no cover
-
- return request, handler
-
- @api_method
- def checksum(self, with_rev=False, with_data=False):
- """Return the collection checksum.
-
- :param with_rev: include the document revisions in the checksum
- calculation
- :type with_rev: bool
- :param with_data: include the document data in the checksum
- calculation
- :type with_data: bool
- :returns: the collection checksum
- :rtype: int
- :raises arango.exceptions.CollectionChecksumError: if the
- collection checksum cannot be retrieved
- """
- request = Request(
- method='get',
- endpoint='/_api/collection/{}/checksum'.format(self._name),
- params={'withRevision': with_rev, 'withData': with_data}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionChecksumError(res)
- return int(res.body['checksum'])
-
- return request, handler
-
- @api_method
- def truncate(self):
- """Truncate the collection.
-
- :returns: the collection details
- :rtype: dict
- :raises arango.exceptions.CollectionTruncateError: if the collection
- cannot be truncated
- """
- request = Request(
- method='put',
- endpoint='/_api/collection/{}/truncate'.format(self._name),
- command='db.{}.truncate()'.format(self._name)
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise CollectionTruncateError(res)
- return {
- 'id': res.body['id'],
- 'is_system': res.body['isSystem'],
- 'name': res.body['name'],
- 'status': self._status(res.body['status']),
- 'type': self.TYPES[res.body['type']]
- }
-
- return request, handler
-
- @api_method
- def count(self):
- """Return the number of documents in the collection.
-
- :returns: the number of documents
- :rtype: int
- :raises arango.exceptions.DocumentCountError: if the document
- count cannot be retrieved
- """
- request = Request(
- method='get',
- endpoint='/_api/collection/{}/count'.format(self._name)
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentCountError(res)
- return res.body['count']
-
- return request, handler
-
- @api_method
- def has(self, key, rev=None, match_rev=True):
- """Check if a document exists in the collection by its key.
-
- :param key: the document key
- :type key: dict | str | unicode
- :param rev: the document revision to be compared against the revision
- of the target document
- :type rev: str | unicode
- :param match_rev: if ``True``, check if the given revision and
- the target document's revisions are the same, otherwise check if
- the revisions are different (this flag has an effect only when
- **rev** is given)
- :type match_rev: bool
- :returns: whether the document exists
- :rtype: bool
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the retrieved document
- :raises arango.exceptions.DocumentInError: if the check cannot
- be executed
- """
- request = Request(
- method='get', # TODO async seems to freeze when using 'head'
- endpoint='/_api/document/{}/{}'.format(self._name, key),
- headers=(
- {'If-Match' if match_rev else 'If-None-Match': rev}
- if rev is not None else {}
- )
- )
-
- def handler(res):
- if res.status_code == 404 and res.error_code == 1202:
- return False
- elif res.status_code in HTTP_OK:
- return True
- raise DocumentInError(res)
-
- return request, handler
-
- @api_method
- def all(self,
- skip=None,
- limit=None):
- """Return all documents in the collection using a server cursor.
-
- :param skip: the number of documents to skip
- :type skip: int
- :param limit: the max number of documents fetched by the cursor
- :type limit: int
- :returns: the document cursor
- :rtype: arango.cursor.Cursor
- :raises arango.exceptions.DocumentGetError: if the documents in
- the collection cannot be retrieved
- """
-
- data = {'collection': self._name}
- if skip is not None:
- data['skip'] = skip
- if limit is not None:
- data['limit'] = limit
-
- request = Request(
- method='put',
- endpoint='/_api/simple/all',
- data=data
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return Cursor(self._conn, res.body)
-
- return request, handler
-
- @api_method
- def export(self,
- limit=None,
- count=False,
- batch_size=None,
- flush=None,
- flush_wait=None,
- ttl=None,
- filter_fields=None,
- filter_type='include'): # pragma: no cover
- """"Export all documents in the collection using a server cursor.
-
- :param flush: flush the WAL prior to the export
- :type flush: bool
- :param flush_wait: the max wait time in seconds for the WAL flush
- :type flush_wait: int
- :param count: include the document count in the server cursor
- (default: ``False``)
- :type count: bool
- :param batch_size: the max number of documents in the batch fetched by
- th cursor in one round trip
- :type batch_size: int
- :param limit: the max number of documents fetched by the cursor
- :type limit: int
- :param ttl: time-to-live for the cursor on the server
- :type ttl: int
- :param filter_fields: list of document fields to filter by
- :type filter_fields: list
- :param filter_type: ``"include"`` (default) or ``"exclude"``
- :type filter_type: str | unicode
- :returns: the document export cursor
- :rtype: arango.cursor.ExportCursor
- :raises arango.exceptions.DocumentGetError: if the documents in
- the collection cannot be exported
-
- .. note::
- If **flush** is not set to ``True``, the documents in WAL during
- time of the retrieval are *not* included by the server cursor
- """
- data = {'count': count}
- if flush is not None: # pragma: no cover
- data['flush'] = flush
- if flush_wait is not None: # pragma: no cover
- data['flushWait'] = flush_wait
- if batch_size is not None:
- data['batchSize'] = batch_size
- if limit is not None:
- data['limit'] = limit
- if ttl is not None:
- data['ttl'] = ttl
- if filter_fields is not None:
- data['restrict'] = {
- 'fields': filter_fields,
- 'type': filter_type
- }
- request = Request(
- method='post',
- endpoint='/_api/export',
- params={'collection': self._name},
- data=data
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return ExportCursor(self._conn, res.body)
-
- return request, handler
-
- @api_method
- def find(self, filters, offset=None, limit=None):
- """Return all documents that match the given filters.
-
- :param filters: the document filters
- :type filters: dict
- :param offset: the number of documents to skip initially
- :type offset: int
- :param limit: the max number of documents to return
- :type limit: int
- :returns: the document cursor
- :rtype: arango.cursor.Cursor
- :raises arango.exceptions.DocumentGetError: if the document
- cannot be fetched from the collection
- """
- data = {'collection': self._name, 'example': filters}
- if offset is not None:
- data['skip'] = offset
- if limit is not None:
- data['limit'] = limit
-
- request = Request(
- method='put',
- endpoint='/_api/simple/by-example',
- data=data
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return Cursor(self._conn, res.body)
-
- return request, handler
-
- @api_method
- def get_many(self, keys):
- """Return multiple documents by their keys.
-
- :param keys: the list of document keys
- :type keys: list
- :returns: the list of documents
- :rtype: list
- :raises arango.exceptions.DocumentGetError: if the documents
- cannot be fetched from the collection
- """
- request = Request(
- method='put',
- endpoint='/_api/simple/lookup-by-keys',
- data={'collection': self._name, 'keys': keys}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return res.body['documents']
-
- return request, handler
-
- @api_method
- def random(self):
- """Return a random document from the collection.
-
- :returns: a random document
- :rtype: dict
- :raises arango.exceptions.DocumentGetError: if the document cannot
- be fetched from the collection
- """
- request = Request(
- method='put',
- endpoint='/_api/simple/any',
- data={'collection': self._name}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return res.body['document']
-
- return request, handler
-
- @api_method
- def find_near(self, latitude, longitude, limit=None):
- """Return documents near a given coordinate.
-
- By default, at most 100 documents near the coordinate are returned.
- Documents returned are sorted according to distance, with the nearest
- document being the first. If there are documents of equal distance,
- they are be randomly chosen from the set until the limit is reached.
-
- :param latitude: the latitude
- :type latitude: int
- :param longitude: the longitude
- :type longitude: int
- :param limit: the max number of documents to return
- :type limit: int
- :returns: the document cursor
- :rtype: arango.cursor.Cursor
- :raises arango.exceptions.DocumentGetError: if the documents
- cannot be fetched from the collection
-
- .. note::
- A geo index must be defined in the collection for this method to
- be used
- """
- full_query = """
- FOR doc IN NEAR(@collection, @latitude, @longitude{})
- RETURN doc
- """.format(', @limit' if limit is not None else '')
-
- bind_vars = {
- 'collection': self._name,
- 'latitude': latitude,
- 'longitude': longitude
- }
- if limit is not None:
- bind_vars['limit'] = limit
-
- request = Request(
- method='post',
- endpoint='/_api/cursor',
- data={'query': full_query, 'bindVars': bind_vars}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return Cursor(self._conn, res.body)
-
- return request, handler
-
- @api_method
- def find_in_range(self,
- field,
- lower,
- upper,
- offset=0,
- limit=100,
- inclusive=True):
- """Return documents within a given range in a random order.
-
- :param field: the name of the field to use
- :type field: str | unicode
- :param lower: the lower bound
- :type lower: int
- :param upper: the upper bound
- :type upper: int
- :param offset: the number of documents to skip
- :type offset: int
- :param limit: the max number of documents to return
- :type limit: int
- :param inclusive: include the lower and upper bounds
- :type inclusive: bool
- :returns: the document cursor
- :rtype: arango.cursor.Cursor
- :raises arango.exceptions.DocumentGetError: if the documents
- cannot be fetched from the collection
-
- .. note::
- A geo index must be defined in the collection for this method to
- be used
- """
- if inclusive:
- full_query = """
- FOR doc IN @@collection
- FILTER doc.@field >= @lower && doc.@field <= @upper
- LIMIT @skip, @limit
- RETURN doc
- """
- else:
- full_query = """
- FOR doc IN @@collection
- FILTER doc.@field > @lower && doc.@field < @upper
- LIMIT @skip, @limit
- RETURN doc
- """
- bind_vars = {
- '@collection': self._name,
- 'field': field,
- 'lower': lower,
- 'upper': upper,
- 'skip': offset,
- 'limit': limit
- }
-
- request = Request(
- method='post',
- endpoint='/_api/cursor',
- data={'query': full_query, 'bindVars': bind_vars}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return Cursor(self._conn, res.body)
-
- return request, handler
-
- # TODO the WITHIN geo function does not seem to work properly
- @api_method
- def find_in_radius(self, latitude, longitude, radius, distance_field=None):
- """Return documents within a given radius in a random order.
-
- :param latitude: the latitude
- :type latitude: int
- :param longitude: the longitude
- :type longitude: int
- :param radius: the maximum radius
- :type radius: int
- :param distance_field: the key containing the distance
- :type distance_field: str | unicode
- :returns: the document cursor
- :rtype: arango.cursor.Cursor
- :raises arango.exceptions.DocumentGetError: if the documents
- cannot be fetched from the collection
-
- .. note::
- A geo index must be defined in the collection for this method to
- be used
- """
- full_query = """
- FOR doc IN WITHIN(@collection, @latitude, @longitude, @radius{})
- RETURN doc
- """.format(', @distance' if distance_field is not None else '')
-
- bind_vars = {
- 'collection': self._name,
- 'latitude': latitude,
- 'longitude': longitude,
- 'radius': radius
- }
- if distance_field is not None:
- bind_vars['distance'] = distance_field
-
- request = Request(
- method='post',
- endpoint='/_api/cursor',
- data={'query': full_query, 'bindVars': bind_vars}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return Cursor(self._conn, res.body)
-
- return request, handler
-
- @api_method
- def find_in_box(self,
- latitude1,
- longitude1,
- latitude2,
- longitude2,
- skip=None,
- limit=None,
- geo_field=None):
- """Return all documents in an rectangular area.
-
- :param latitude1: the first latitude
- :type latitude1: int
- :param longitude1: the first longitude
- :type longitude1: int
- :param latitude2: the second latitude
- :type latitude2: int
- :param longitude2: the second longitude
- :type longitude2: int
- :param skip: the number of documents to skip
- :type skip: int
- :param limit: the max number of documents to return (if 0 is given all
- documents are returned)
- :type limit: int
- :param geo_field: the field to use for geo index
- :type geo_field: str | unicode
- :returns: the document cursor
- :rtype: arango.cursor.Cursor
- :raises arango.exceptions.DocumentGetError: if the documents
- cannot be fetched from the collection
- """
- data = {
- 'collection': self._name,
- 'latitude1': latitude1,
- 'longitude1': longitude1,
- 'latitude2': latitude2,
- 'longitude2': longitude2,
- }
- if skip is not None:
- data['skip'] = skip
- if limit is not None:
- data['limit'] = limit
- if geo_field is not None:
- data['geo'] = '/'.join([self._name, geo_field])
-
- request = Request(
- method='put',
- endpoint='/_api/simple/within-rectangle',
- data=data
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return Cursor(self._conn, res.body)
-
- return request, handler
-
- @api_method
- def find_by_text(self, key, query, limit=None):
- """Return documents that match the specified fulltext **query**.
-
- :param key: the key with a fulltext index
- :type key: str | unicode
- :param query: the fulltext query
- :type query: str | unicode
- :param limit: the max number of documents to return
- :type limit: int
- :returns: the document cursor
- :rtype: arango.cursor.Cursor
- :raises arango.exceptions.DocumentGetError: if the documents
- cannot be fetched from the collection
- """
- full_query = """
- FOR doc IN FULLTEXT(@collection, @field, @query{})
- RETURN doc
- """.format(', @limit' if limit is not None else '')
-
- bind_vars = {
- 'collection': self._name,
- 'field': key,
- 'query': query
- }
- if limit is not None:
- bind_vars['limit'] = limit
-
- request = Request(
- method='post',
- endpoint='/_api/cursor',
- data={'query': full_query, 'bindVars': bind_vars}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return Cursor(self._conn, res.body)
-
- return request, handler
-
- @api_method
- def indexes(self):
- """Return the collection indexes.
-
- :returns: the collection indexes
- :rtype: [dict]
- :raises arango.exceptions.IndexListError: if the list of indexes
- cannot be retrieved
-
- """
- request = Request(
- method='get',
- endpoint='/_api/index',
- params={'collection': self._name}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise IndexListError(res)
-
- indexes = []
- for index in res.body['indexes']:
- index['id'] = index['id'].split('/', 1)[1]
- if 'minLength' in index:
- index['min_length'] = index.pop('minLength')
- if 'geoJson' in index:
- index['geo_json'] = index.pop('geoJson')
- if 'ignoreNull' in index:
- index['ignore_none'] = index.pop('ignoreNull')
- if 'selectivityEstimate' in index:
- index['selectivity'] = index.pop('selectivityEstimate')
- indexes.append(index)
- return indexes
-
- return request, handler
-
- def _add_index(self, data):
- """Helper method for creating a new index."""
- request = Request(
- method='post',
- endpoint='/_api/index',
- data=data,
- params={'collection': self._name}
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise IndexCreateError(res)
- details = res.body
- details['id'] = details['id'].split('/', 1)[1]
- details.pop('error', None)
- details.pop('code', None)
- if 'minLength' in details:
- details['min_length'] = details.pop('minLength')
- if 'geoJson' in details:
- details['geo_json'] = details.pop('geoJson')
- if 'ignoreNull' in details:
- details['ignore_none'] = details.pop('ignoreNull')
- if 'selectivityEstimate' in details:
- details['selectivity'] = details.pop('selectivityEstimate')
- if 'isNewlyCreated' in details:
- details['new'] = details.pop('isNewlyCreated')
- return details
-
- return request, handler
-
- @api_method
- def add_hash_index(self,
- fields,
- unique=None,
- sparse=None,
- deduplicate=None):
- """Create a new hash index in the collection.
-
- :param fields: the document fields to index
- :type fields: list
- :param unique: whether the index is unique
- :type unique: bool
- :param sparse: index ``None``'s
- :type sparse: bool
- :param deduplicate: Controls whether inserting duplicate index values
- from the same document into a unique array index leads to a unique
- constraint error or not. If set to ``True`` (default), only a
- single instance of each non-unique index values is inserted into
- the index per document. Trying to insert a value into the index
- that already exists will always fail, regardless of the value of
- this field.
- :type deduplicate: bool
- :returns: the details on the new index
- :rtype: dict
- :raises arango.exceptions.IndexCreateError: if the hash index cannot
- be created in the collection
- """
- data = {'type': 'hash', 'fields': fields}
- if unique is not None:
- data['unique'] = unique
- if sparse is not None:
- data['sparse'] = sparse
- if deduplicate is not None:
- data['deduplicate'] = deduplicate
- return self._add_index(data)
-
- @api_method
- def add_skiplist_index(self,
- fields,
- unique=None,
- sparse=None,
- deduplicate=None):
- """Create a new skiplist index in the collection.
-
- A skiplist index is used to find the ranges of documents (e.g. time).
-
- :param fields: the document fields to index
- :type fields: list
- :param unique: whether the index is unique
- :type unique: bool
- :param sparse: index ``None``'s
- :type sparse: bool
- :param deduplicate: Controls whether inserting duplicate index values
- from the same document into a unique array index leads to a unique
- constraint error or not. If set to ``True`` (default), only a
- single instance of each non-unique index values is inserted into
- the index per document. Trying to insert a value into the index
- that already exists will always fail, regardless of the value of
- this field.
- :type deduplicate: bool
- :returns: the details on the new index
- :rtype: dict
- :raises arango.exceptions.IndexCreateError: if the skiplist index
- cannot be created in the collection
- """
- data = {'type': 'skiplist', 'fields': fields}
- if unique is not None:
- data['unique'] = unique
- if sparse is not None:
- data['sparse'] = sparse
- if deduplicate is not None:
- data['deduplicate'] = deduplicate
- return self._add_index(data)
-
- @api_method
- def add_geo_index(self, fields, ordered=None):
- """Create a geo-spatial index in the collection.
-
- :param fields: if given a single field, the index is created using its
- value (which must be a list with at least two floats), and if given
- a list of fields, the index is created using values of both;
- documents without the fields or with invalid values are ignored.
- :type fields: list
- :param ordered: whether the order is longitude -> latitude
- :type ordered: bool
- :returns: the details on the new index
- :rtype: dict
- :raises arango.exceptions.IndexCreateError: if the geo-spatial index
- cannot be created in the collection
- """
- data = {'type': 'geo', 'fields': fields}
- if ordered is not None:
- data['geoJson'] = ordered
- return self._add_index(data)
-
- @api_method
- def add_fulltext_index(self, fields, min_length=None):
- """Create a fulltext index in the collection.
-
- A fulltext index is used to find words or prefixes of words. Only words
- with textual values of minimum length are indexed. Word tokenization is
- done using the word boundary analysis provided by libicu, which uses
- the language selected during server startup. Words are indexed in their
- lower-cased form. The index supports complete match and prefix queries.
-
- :param fields: the field to index
- :type fields: list
- :param min_length: the minimum number of characters to index
- :type min_length: int
- :returns: the details on the new index
- :rtype: dict
- :raises arango.exceptions.IndexCreateError: if the fulltext index
- cannot be created in the collection
- """
- # TODO keep an eye on this for future ArangoDB releases
- if len(fields) > 1:
- raise IndexCreateError('Only one field is currently supported')
-
- data = {'type': 'fulltext', 'fields': fields}
- if min_length is not None:
- data['minLength'] = min_length
- return self._add_index(data)
-
- @api_method
- def add_persistent_index(self, fields, unique=None, sparse=None):
- """Create a persistent index in the collection.
-
- :param fields: the field to index
- :type fields: list
- :param unique: whether the index is unique
- :type unique: bool
- :param sparse: exclude documents that do not contain at least one of
- the indexed fields or that have a value of ``None`` in any of the
- indexed fields
- :type sparse: bool
- :returns: the details on the new index
- :rtype: dict
- :raises arango.exceptions.IndexCreateError: if the persistent index
- cannot be created in the collection
-
- .. note::
- Unique persistent indexes on non-sharded keys are not supported
- in a cluster
- """
- data = {'type': 'persistent', 'fields': fields}
- if unique is not None:
- data['unique'] = unique
- if sparse is not None:
- data['sparse'] = sparse
- return self._add_index(data)
-
- @api_method
- def delete_index(self, index_id, ignore_missing=False):
- """Delete an index from the collection.
-
- :param index_id: the ID of the index to delete
- :type index_id: str | unicode
- :param ignore_missing: ignore missing indexes
- :type ignore_missing: bool
- :returns: whether the index was deleted successfully
- :rtype: bool
- :raises arango.exceptions.IndexDeleteError: if the specified index
- cannot be deleted from the collection
- """
- request = Request(
- method='delete',
- endpoint='/_api/index/{}/{}'.format(self._name, index_id)
- )
-
- def handler(res):
- if res.status_code == 404 and res.error_code == 1212:
- if ignore_missing:
- return False
- raise IndexDeleteError(res)
- if res.status_code not in HTTP_OK:
- raise IndexDeleteError(res)
- return not res.body['error']
-
- return request, handler
-
- @api_method
- def user_access(self, username):
- """Return a user's access details for the collection.
-
- Appropriate permissions are required in order to execute this method.
-
- :param username: The name of the user.
- :type username: str | unicode
- :returns: The access details (e.g. ``"rw"``, ``None``)
- :rtype: str | unicode | None
- :raises: arango.exceptions.UserAccessError: If the retrieval fails.
- """
- request = Request(
- method='get',
- endpoint='/_api/user/{}/database/{}/{}'.format(
- username, self.database, self.name
- )
- )
-
- def handler(res):
- if res.status_code in HTTP_OK:
- result = res.body['result'].lower()
- return None if result == 'none' else result
- raise UserAccessError(res)
-
- return request, handler
-
- @api_method
- def grant_user_access(self, username):
- """Grant user access to the collection.
-
- Appropriate permissions are required in order to execute this method.
-
- :param username: The name of the user.
- :type username: str | unicode
- :returns: Whether the operation was successful or not.
- :rtype: bool
- :raises arango.exceptions.UserGrantAccessError: If the operation fails.
- """
- request = Request(
- method='put',
- endpoint='/_api/user/{}/database/{}/{}'.format(
- username, self.database, self.name
- ),
- data={'grant': 'rw'}
- )
-
- def handler(res):
- if res.status_code in HTTP_OK:
- return True
- raise UserGrantAccessError(res)
-
- return request, handler
-
- @api_method
- def revoke_user_access(self, username):
- """Revoke user access to the collection.
-
- Appropriate permissions are required in order to execute this method.
-
- :param username: The name of the user.
- :type username: str | unicode
- :returns: Whether the operation was successful or not.
- :rtype: bool
- :raises arango.exceptions.UserRevokeAccessError: If the operation fails.
- """
- request = Request(
- method='delete',
- endpoint='/_api/user/{}/database/{}/{}'.format(
- username, self.database, self.name
- )
- )
-
- def handler(res):
- if res.status_code in HTTP_OK:
- return True
- raise UserRevokeAccessError(res)
-
- return request, handler
diff --git a/arango/collections/edge.py b/arango/collections/edge.py
deleted file mode 100644
index 8e9d753c..00000000
--- a/arango/collections/edge.py
+++ /dev/null
@@ -1,269 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from arango.api import api_method
-from arango.collections.base import BaseCollection
-from arango.exceptions import *
-from arango.request import Request
-from arango.utils import HTTP_OK
-
-
-class EdgeCollection(BaseCollection):
- """ArangoDB edge collection.
-
- An edge collection consists of edge documents. It is uniquely identified
- by its name, which must consist only of alphanumeric characters, hyphen
- and the underscore characters. Edge collections share their namespace with
- other types of collections.
-
- The documents in an edge collection are fully accessible from a standard
- collection. Managing documents through an edge collection, however, adds
- additional guarantees: all modifications are executed in transactions and
- edge documents are checked against the edge definitions on insert.
-
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- :param graph_name: the name of the graph
- :type graph_name: str | unicode
- :param name: the name of the edge collection
- :type name: str | unicode
- """
-
- def __init__(self, connection, graph_name, name):
- super(EdgeCollection, self).__init__(connection, name)
- self._graph_name = graph_name
-
- def __repr__(self):
- return (
- ''
- .format(self._name, self._graph_name)
- )
-
- @property
- def graph_name(self):
- """Return the name of the graph.
- :returns: the name of the graph
- :rtype: str | unicode
- """
- return self._graph_name
-
- @api_method
- def get(self, key, rev=None):
- """Fetch a document by key from the edge collection.
-
- :param key: the document key
- :type key: str | unicode
- :param rev: the document revision
- :type rev: str | unicode | None
- :returns: the vertex document or ``None`` if not found
- :rtype: dict | None
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the target document
- :raises arango.exceptions.DocumentGetError: if the document cannot
- be fetched from the collection
- """
- request = Request(
- method='get',
- endpoint='/_api/gharial/{}/edge/{}/{}'.format(
- self._graph_name, self._name, key
- ),
- headers={'If-Match': rev} if rev else {}
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code == 404 and res.error_code == 1202:
- return None
- elif res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return res.body["edge"]
-
- return request, handler
-
- @api_method
- def insert(self, document, sync=None):
- """Insert a new document into the edge collection.
-
- If the ``"_key"`` field is present in **document**, its value is used
- as the key of the new document. Otherwise, the key is auto-generated.
- The **document** must contain the fields ``"_from"`` and ``"_to"``.
-
- :param document: the document body
- :type document: dict
- :param sync: wait for the operation to sync to disk
- :type sync: bool | None
- :returns: the ID, revision and key of the new document
- :rtype: dict
- :raises arango.exceptions.DocumentInsertError: if the document cannot
- be inserted into the collection
- """
- params = {}
- if sync is not None:
- params['waitForSync'] = sync
-
- request = Request(
- method='post',
- endpoint="/_api/gharial/{}/edge/{}".format(
- self._graph_name, self._name
- ),
- data=document,
- params=params
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentInsertError(res)
- return res.body["edge"]
-
- return request, handler
-
- @api_method
- def update(self, document, keep_none=True, sync=None):
- """Update a document by its key in the edge collection.
-
- The ``"_key"`` field must be present in **document**. If the ``"_rev"``
- field is present in **document**, its value is compared against the
- revision of the target document.
-
- :param document: the partial/full document with the updated values
- :type document: dict
- :param keep_none: if ``True``, the fields with value ``None``
- are retained in the document, otherwise the fields are removed
- from the document completely
- :type keep_none: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool | None
- :returns: the ID, revision and key of the updated document
- :rtype: dict
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the target document
- :raises arango.exceptions.DocumentUpdateError: if the document cannot
- be updated """
- params = {'keepNull': keep_none}
- if sync is not None:
- params['waitForSync'] = sync
-
- headers = {}
- revision = document.get('_rev')
- if revision is not None:
- headers['If-Match'] = revision
-
- request = Request(
- method='patch',
- endpoint='/_api/gharial/{}/edge/{}/{}'.format(
- self._graph_name, self._name, document['_key']
- ),
- data=document,
- params=params,
- headers=headers
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code not in HTTP_OK:
- raise DocumentUpdateError(res)
- edge = res.body["edge"]
- edge['_old_rev'] = edge.pop('_oldRev')
- return edge
-
- return request, handler
-
- @api_method
- def replace(self, document, sync=None):
- """Replace a document by its key in the edge collection.
-
- The ``"_key"``, ``"_from"`` and ``"_to"`` fields must be present in
- **document**. If the ``"_rev"`` field is present in **document**, its
- value is compared against the revision of the target document.
-
- :param document: the new document
- :type document: dict
- :param sync: wait for the operation to sync to disk
- :type sync: bool | None
- :returns: the ID, revision and key of the replaced document
- :rtype: dict
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the target document
- :raises arango.exceptions.DocumentReplaceError: if the document cannot
- be replaced """
- headers, params = {}, {}
- if sync is not None:
- params['waitForSync'] = sync
-
- revision = document.get('_rev')
- if revision is not None:
- headers['If-Match'] = revision
-
- request = Request(
- method='put',
- endpoint='/_api/gharial/{}/edge/{}/{}'.format(
- self._graph_name, self._name, document['_key']
- ),
- data=document,
- params=params,
- headers=headers
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code not in HTTP_OK:
- raise DocumentReplaceError(res)
- edge = res.body["edge"]
- edge['_old_rev'] = edge.pop('_oldRev')
- return edge
-
- return request, handler
-
- @api_method
- def delete(self, document, ignore_missing=False, sync=None):
- """Delete a document from the collection by its key.
-
- The ``"_key"`` field must be present in **document**. If the ``"_rev"``
- field is present in **document**, its value is compared against the
- revision of the target document.
-
- :param document: the document to delete
- :type document: dict
- :param sync: wait for the operation to sync to disk
- :type sync: bool | None
- :param ignore_missing: ignore missing documents
- :type ignore_missing: bool
- :returns: whether the document was deleted successfully
- :rtype: bool
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the target document
- :raises arango.exceptions.DocumentDeleteError: if the document cannot
- be deleted from the collection
- """
- params = {}
- if sync is not None:
- params['waitForSync'] = sync
-
- headers = {}
- revision = document.get('_rev')
- if revision is not None:
- headers['If-Match'] = revision
-
- request = Request(
- method='delete',
- endpoint='/_api/gharial/{}/edge/{}/{}'.format(
- self._graph_name, self._name, document['_key']
- ),
- params=params,
- headers=headers
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code == 404 and res.error_code == 1202:
- if ignore_missing:
- return False
- raise DocumentDeleteError(res)
- elif res.status_code not in HTTP_OK:
- raise DocumentDeleteError(res)
- return res.body['removed']
-
- return request, handler
diff --git a/arango/collections/standard.py b/arango/collections/standard.py
deleted file mode 100644
index 554617e9..00000000
--- a/arango/collections/standard.py
+++ /dev/null
@@ -1,998 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from json import dumps
-from six import string_types
-
-from arango.api import api_method
-from arango.collections.base import BaseCollection
-from arango.exceptions import *
-from arango.request import Request
-from arango.utils import HTTP_OK
-
-
-class Collection(BaseCollection):
- """ArangoDB (standard) collection.
-
- A collection consists of documents. It is uniquely identified by its name,
- which must consist only of alphanumeric, hyphen and underscore characters.
- There are two collection types: *document* and *edge*.
-
- Be default, collections use the traditional key generator, which generates
- key values in a non-deterministic fashion. A deterministic, auto-increment
- key generator can be used as well.
-
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- :param name: the name of the collection
- :type name: str | unicode
- """
-
- def __init__(self, connection, name):
- super(Collection, self).__init__(connection, name)
-
- def __repr__(self):
- return ''.format(self._name)
-
- @api_method
- def get(self, key, rev=None, match_rev=True):
- """Retrieve a document by its key.
-
- :param key: the document key
- :type key: str | unicode
- :param rev: the revision to compare with that of the retrieved document
- :type rev: str | unicode
- :param match_rev: if ``True``, check if the given revision and
- the target document's revisions are the same, otherwise check if
- the revisions are different (this flag has an effect only when
- **rev** is given)
- :type match_rev: bool
- :returns: the document or ``None`` if the document is missing
- :rtype: dict
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the retrieved document
- :raises arango.exceptions.DocumentGetError: if the document cannot
- be retrieved from the collection
- """
- request = Request(
- method='get',
- endpoint='/_api/document/{}/{}'.format(self._name, key),
- headers=(
- {'If-Match' if match_rev else 'If-None-Match': rev}
- if rev is not None else {}
- )
- )
-
- def handler(res):
- if res.status_code in {304, 412}:
- raise DocumentRevisionError(res)
- elif res.status_code == 404 and res.error_code == 1202:
- return None
- elif res.status_code in HTTP_OK:
- return res.body
- raise DocumentGetError(res)
-
- return request, handler
-
- @api_method
- def insert(self, document, return_new=False, sync=None):
- """Insert a new document into the collection.
-
- If the ``"_key"`` field is present in **document**, its value is used
- as the key of the new document. Otherwise, the key is auto-generated.
- The ``"_id"`` and ``"_rev"`` fields are ignored if present in the
- document.
-
- :param document: the document to insert
- :type document: dict
- :param return_new: if ``True``, the full body of the new
- document is included in the returned result
- :type return_new: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the result of the insert (e.g. document key, revision)
- :rtype: dict
- :raises arango.exceptions.DocumentInsertError: if the document cannot
- be inserted into the collection
-
- .. note::
- Argument **return_new** has no effect in transactions
- """
- params = {'returnNew': return_new}
- if sync is not None:
- params['waitForSync'] = sync
-
- if self._conn.type != 'transaction':
- command = None
- else:
- command = 'db.{}.insert({},{})'.format(
- self._name,
- dumps(document),
- dumps(params)
- )
-
- request = Request(
- method='post',
- endpoint='/_api/document/{}'.format(self._name),
- data=document,
- params=params,
- command=command
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentInsertError(res)
- if res.status_code == 202:
- res.body['sync'] = False
- else:
- res.body['sync'] = True
- return res.body
-
- return request, handler
-
- @api_method
- def insert_many(self, documents, return_new=False, sync=None):
- """Insert multiple documents into the collection.
-
- If the ``"_key"`` fields are present in the entries in **documents**,
- their values are used as the keys of the new documents. Otherwise the
- keys are auto-generated. Any ``"_id"`` and ``"_rev"`` fields present
- in the documents are ignored.
-
- :param documents: the list of the new documents to insert
- :type documents: list
- :param return_new: if ``True``, the new bodies of the documents
- are included in the returned result
- :type return_new: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the result of the insert (e.g. document keys, revisions)
- :rtype: dict
- :raises arango.exceptions.DocumentInsertError: if the documents cannot
- be inserted into the collection
-
- .. note::
- Argument **return_new** has no effect in a transaction
- """
- params = {'returnNew': return_new}
- if sync is not None:
- params['waitForSync'] = sync
-
- if self._conn.type != 'transaction':
- command = None
- else:
- command = 'db.{}.insert({},{})'.format(
- self._name,
- dumps(documents),
- dumps(params)
- )
-
- request = Request(
- method='post',
- endpoint='/_api/document/{}'.format(self._name),
- data=documents,
- params=params,
- command=command
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentInsertError(res)
-
- results = []
- for result in res.body:
- if '_id' not in result:
- result = DocumentInsertError(
- res.update_body(result)
- )
- elif res.status_code == 202:
- result['sync'] = False
- elif res.status_code:
- result['sync'] = True
- results.append(result)
- return results
-
- return request, handler
-
- @api_method
- def update(self,
- document,
- merge=True,
- keep_none=True,
- return_new=False,
- return_old=False,
- check_rev=False,
- sync=None):
- """Update a document by its key.
-
- :param document: the document with updates
- :type document: dict
- :param merge: if ``True``, sub-dictionaries are merged rather
- than overwritten completely
- :type merge: bool
- :param keep_none: if ``True``, the fields with value ``None`` are
- retained in the document, otherwise they are removed
- :type keep_none: bool
- :param return_new: if ``True``, the full body of the new document is
- included in the returned result
- :type return_new: bool
- :param return_old: if ``True``, the full body of the old document is
- included in the returned result
- :type return_old: bool
- :param check_rev: if ``True``, the ``"_rev"`` field in **document**
- is compared against the revision of the target document
- :type check_rev: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the result of the update (e.g. document key, revision)
- :rtype: dict
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the document
- :raises arango.exceptions.DocumentUpdateError: if the document cannot
- be updated
-
- .. note::
- The ``"_key"`` field must be present in **document**.
-
- .. note::
- Arguments **return_new** and **return_old** have no effect in
- transactions
- """
- params = {
- 'keepNull': keep_none,
- 'mergeObjects': merge,
- 'returnNew': return_new,
- 'returnOld': return_old,
- 'ignoreRevs': not check_rev,
- 'overwrite': not check_rev
- }
- if sync is not None:
- params['waitForSync'] = sync
-
- if self._conn.type != 'transaction':
- command = None
- else:
- if not check_rev:
- document.pop('_rev', None)
- documents_str = dumps(document)
- command = 'db.{}.update({},{},{})'.format(
- self._name,
- documents_str,
- documents_str,
- dumps(params)
- )
-
- request = Request(
- method='patch',
- endpoint='/_api/document/{}/{}'.format(
- self._name, document['_key']
- ),
- data=document,
- params=params,
- command=command
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code not in HTTP_OK:
- raise DocumentUpdateError(res)
- elif res.status_code == 202:
- res.body['sync'] = False
- else:
- res.body['sync'] = True
- res.body['_old_rev'] = res.body.pop('_oldRev')
- return res.body
-
- return request, handler
-
- @api_method
- def update_many(self,
- documents,
- merge=True,
- keep_none=True,
- return_new=False,
- return_old=False,
- check_rev=False,
- sync=None):
- """Update multiple documents in the collection.
-
- :param documents: the list of documents with updates
- :type documents: list
- :param merge: if ``True``, sub-dictionaries are merged rather
- than overwritten completely
- :type merge: bool
- :param keep_none: if ``True``, the fields with value ``None`` are
- retained in the document, otherwise they are removed
- :type keep_none: bool
- :param return_new: if ``True``, the full bodies of the new documents
- are included in the returned result
- :type return_new: bool
- :param return_old: if ``True``, the full bodies of the old documents
- are included in the returned result
- :type return_old: bool
- :param check_rev: if ``True``, the ``"_rev"`` field in **document**
- is compared against the revision of the target document
- :type check_rev: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the result of the update (e.g. document keys, revisions)
- :rtype: dict
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the documents
- :raises arango.exceptions.DocumentUpdateError: if the documents cannot
- be updated
-
- .. note::
- The ``"_key"`` field must be present in **document**.
-
- .. note::
- Arguments **return_new** and **return_old** have no effect in
- transactions
-
- .. warning::
- The returned details (whose size scales with the number of target
- documents) are all brought into memory
- """
- params = {
- 'keepNull': keep_none,
- 'mergeObjects': merge,
- 'returnNew': return_new,
- 'returnOld': return_old,
- 'ignoreRevs': not check_rev,
- 'overwrite': not check_rev
- }
- if sync is not None:
- params['waitForSync'] = sync
-
- if self._conn.type != 'transaction':
- command = None
- else:
- documents_str = dumps(documents)
- command = 'db.{}.update({},{},{})'.format(
- self._name,
- documents_str,
- documents_str,
- dumps(params)
- )
-
- request = Request(
- method='patch',
- endpoint='/_api/document/{}'.format(self._name),
- data=documents,
- params=params,
- command=command
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentUpdateError(res)
-
- results = []
- for result in res.body:
- # TODO this is not clean
- if '_id' not in result:
- # An error occurred with this particular document
- err = res.update_body(result)
- # Single out revision error
- if result['errorNum'] == 1200:
- result = DocumentRevisionError(err)
- else:
- result = DocumentUpdateError(err)
- else:
- if res.status_code == 202:
- result['sync'] = False
- elif res.status_code:
- result['sync'] = True
- result['_old_rev'] = result.pop('_oldRev')
- results.append(result)
-
- return results
-
- return request, handler
-
- @api_method
- def update_match(self,
- filters,
- body,
- limit=None,
- keep_none=True,
- sync=None):
- """Update matching documents in the collection.
-
- :param filters: the filters
- :type filters: dict
- :param body: the document body
- :type body: dict
- :param limit: the max number of documents to return
- :type limit: int
- :param keep_none: if ``True``, the fields with value ``None``
- are retained in the document, otherwise the fields are removed
- from the document completely
- :type keep_none: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the number of documents updated
- :rtype: int
- :raises arango.exceptions.DocumentUpdateError: if the documents
- cannot be updated
- """
- data = {
- 'collection': self._name,
- 'example': filters,
- 'newValue': body,
- 'keepNull': keep_none,
- }
- if limit is not None:
- data['limit'] = limit
- if sync is not None:
- data['waitForSync'] = sync
-
- if self._conn.type != 'transaction':
- command = None
- else:
- command = 'db.{}.updateByExample({},{},{})'.format(
- self._name,
- dumps(filters),
- dumps(body),
- dumps(data)
- )
-
- request = Request(
- method='put',
- endpoint='/_api/simple/update-by-example',
- data=data,
- command=command
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentUpdateError(res)
- return res.body['updated']
-
- return request, handler
-
- @api_method
- def replace(self,
- document,
- return_new=False,
- return_old=False,
- check_rev=False,
- sync=None):
- """Replace a document by its key.
-
- :param document: the new document
- :type document: dict
- :param return_new: if ``True``, the full body of the new document is
- included in the returned result
- :type return_new: bool
- :param return_old: if ``True``, the full body of the old document is
- included in the returned result
- :type return_old: bool
- :param check_rev: if ``True``, the ``"_rev"`` field in **document**
- is compared against the revision of the target document
- :type check_rev: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the result of the replace (e.g. document key, revision)
- :rtype: dict
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the document
- :raises arango.exceptions.DocumentReplaceError: if the document cannot
- be replaced
-
- .. note::
- The ``"_key"`` field must be present in **document**. For edge
- collections the ``"_from"`` and ``"_to"`` fields must also be
- present in **document**.
-
- .. note::
- Arguments **return_new** and **return_old** have no effect in
- transactions
-
- .. warning::
- The returned details (whose size scales with the number of target
- documents) are all brought into memory
-
- """
- params = {
- 'returnNew': return_new,
- 'returnOld': return_old,
- 'ignoreRevs': not check_rev,
- 'overwrite': not check_rev
- }
- if sync is not None:
- params['waitForSync'] = sync
-
- if self._conn.type != 'transaction':
- command = None
- else:
- documents_str = dumps(document)
- command = 'db.{}.replace({},{},{})'.format(
- self._name,
- documents_str,
- documents_str,
- dumps(params)
- )
-
- request = Request(
- method='put',
- endpoint='/_api/document/{}/{}'.format(
- self._name, document['_key']
- ),
- params=params,
- data=document,
- command=command
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- if res.status_code not in HTTP_OK:
- raise DocumentReplaceError(res)
- if res.status_code == 202:
- res.body['sync'] = False
- else:
- res.body['sync'] = True
- res.body['_old_rev'] = res.body.pop('_oldRev')
- return res.body
-
- return request, handler
-
- @api_method
- def replace_many(self,
- documents,
- return_new=False,
- return_old=False,
- check_rev=False,
- sync=None):
- """Replace multiple documents in the collection.
-
- :param documents: the list of new documents
- :type documents: list
- :param return_new: if ``True``, the full bodies of the new documents
- are included in the returned result
- :type return_new: bool
- :param return_old: if ``True``, the full bodies of the old documents
- are included in the returned result
- :type return_old: bool
- :param check_rev: if ``True``, the ``"_rev"`` field in **document**
- is compared against the revision of the target document
- :type check_rev: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the result of the replace (e.g. document keys, revisions)
- :rtype: dict
- :raises arango.exceptions.DocumentReplaceError: if the documents cannot
- be replaced
-
- .. note::
- The ``"_key"`` fields must be present in **documents**. For edge
- collections the ``"_from"`` and ``"_to"`` fields must also be
- present in **documents**.
-
- .. note::
- Arguments **return_new** and **return_old** have no effect in
- transactions
-
- .. warning::
- The returned details (whose size scales with the number of target
- documents) are all brought into memory
- """
- params = {
- 'returnNew': return_new,
- 'returnOld': return_old,
- 'ignoreRevs': not check_rev,
- 'overwrite': not check_rev
- }
- if sync is not None:
- params['waitForSync'] = sync
-
- if self._conn.type != 'transaction':
- command = None
- else:
- documents_str = dumps(documents)
- command = 'db.{}.replace({},{},{})'.format(
- self._name,
- documents_str,
- documents_str,
- dumps(params)
- )
-
- request = Request(
- method='put',
- endpoint='/_api/document/{}'.format(self._name),
- params=params,
- data=documents,
- command=command
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentReplaceError(res)
-
- results = []
- for result in res.body:
- # TODO this is not clean
- if '_id' not in result:
- # An error occurred with this particular document
- err = res.update_body(result)
- # Single out revision error
- if result['errorNum'] == 1200:
- result = DocumentRevisionError(err)
- else:
- result = DocumentReplaceError(err)
- else:
- if res.status_code == 202:
- result['sync'] = False
- elif res.status_code:
- result['sync'] = True
- result['_old_rev'] = result.pop('_oldRev')
- results.append(result)
-
- return results
-
- return request, handler
-
- @api_method
- def replace_match(self, filters, body, limit=None, sync=None):
- """Replace matching documents in the collection.
-
- :param filters: the document filters
- :type filters: dict
- :param body: the document body
- :type body: dict
- :param limit: max number of documents to replace
- :type limit: int
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the number of documents replaced
- :rtype: int
- :raises arango.exceptions.DocumentReplaceError: if the documents
- cannot be replaced
- """
- data = {
- 'collection': self._name,
- 'example': filters,
- 'newValue': body
- }
- if limit is not None:
- data['limit'] = limit
- if sync is not None:
- data['waitForSync'] = sync
-
- if self._conn.type != 'transaction':
- command = None
- else:
- command ='db.{}.replaceByExample({},{},{})'.format(
- self._name,
- dumps(filters),
- dumps(body),
- dumps(data)
- )
-
- request = Request(
- method='put',
- endpoint='/_api/simple/replace-by-example',
- data=data,
- command=command
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentReplaceError(res)
- return res.body['replaced']
-
- return request, handler
-
- @api_method
- def delete(self,
- document,
- ignore_missing=False,
- return_old=False,
- check_rev=False,
- sync=None):
- """Delete a document by its key.
-
- :param document: the document to delete or its key
- :type document: dict | str | unicode
- :param ignore_missing: ignore missing documents (default: ``False``)
- :type ignore_missing: bool
- :param return_old: if ``True``, the full body of the old document is
- included in the returned result (default: ``False``)
- :type return_old: bool
- :param check_rev: if ``True``, the ``"_rev"`` field in **document** is
- compared against the revision of the target document (this flag is
- only applicable when **document** is an actual document and not a
- document key)
- :type check_rev: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the results of the delete (e.g. document key, new revision)
- or ``False`` if the document was missing but ignored
- :rtype: dict
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the target document
- :raises arango.exceptions.DocumentDeleteError: if the document cannot
- be deleted
-
- .. note::
- If **document** is a dictionary it must have the ``"_key"`` field
-
- .. note::
- Argument **return_old** has no effect in transactions
- """
- params = {
- 'returnOld': return_old,
- 'ignoreRevs': not check_rev,
- 'overwrite': not check_rev
- }
- if sync is not None:
- params['waitForSync'] = sync
-
- full_doc = not isinstance(document, string_types)
- if check_rev and full_doc and '_rev' in document:
- headers = {'If-Match': document['_rev']}
- else:
- headers = {}
-
- if self._conn.type != 'transaction':
- command = None
- else:
- command = 'db.{}.remove({},{})'.format(
- self._name,
- dumps(document if full_doc else {'_key': document}),
- dumps(params)
- )
-
- request = Request(
- method='delete',
- endpoint='/_api/document/{}/{}'.format(
- self._name, document['_key'] if full_doc else document
- ),
- params=params,
- headers=headers,
- command=command
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code == 404:
- if ignore_missing:
- return False
- raise DocumentDeleteError(res)
- elif res.status_code not in HTTP_OK:
- raise DocumentDeleteError(res)
- if res.status_code == 202:
- res.body['sync'] = False
- else:
- res.body['sync'] = True
- return res.body
-
- return request, handler
-
- @api_method
- def delete_many(self,
- documents,
- return_old=False,
- check_rev=False,
- sync=None):
- """Delete multiple documents from the collection.
-
- :param documents: the list of documents or keys to delete
- :type documents: list
- :param return_old: if ``True``, the full bodies of the old documents
- are included in the returned result
- :type return_old: bool
- :param check_rev: if ``True``, the ``"_rev"`` field in **document**
- is compared against the revision of the target document
- :type check_rev: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the result of the delete (e.g. document keys, revisions)
- :rtype: dict
- :raises arango.exceptions.DocumentDeleteError: if the documents cannot
- be deleted
-
- .. note::
- If an entry in **documents** is a dictionary it must have the
- ``"_key"`` field
- """
- params = {
- 'returnOld': return_old,
- 'ignoreRevs': not check_rev,
- 'overwrite': not check_rev
- }
- if sync is not None:
- params['waitForSync'] = sync
-
- if self._conn.type != 'transaction':
- command = None
- else:
- command = 'db.{}.remove({},{})'.format(
- self._name,
- dumps(documents),
- dumps(params)
- )
-
- request = Request(
- method='delete',
- endpoint='/_api/document/{}'.format(self._name),
- params=params,
- data=documents,
- command=command
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentDeleteError(res)
-
- results = []
- for result in res.body:
- if '_id' not in result:
- # An error occurred with this particular document
- err = res.update_body(result)
- # Single out revision errors
- if result['errorNum'] == 1200:
- result = DocumentRevisionError(err)
- else:
- result = DocumentDeleteError(err)
- else:
- if res.status_code == 202:
- result['sync'] = False
- elif res.status_code:
- result['sync'] = True
- results.append(result)
-
- return results
-
- return request, handler
-
- @api_method
- def delete_match(self, filters, limit=None, sync=None):
- """Delete matching documents from the collection.
-
- :param filters: the document filters
- :type filters: dict
- :param limit: the the max number of documents to delete
- :type limit: int
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the number of documents deleted
- :rtype: dict
- :raises arango.exceptions.DocumentDeleteError: if the documents
- cannot be deleted from the collection
- """
- data = {'collection': self._name, 'example': filters}
- if sync is not None:
- data['waitForSync'] = sync
- if limit is not None:
- data['limit'] = limit
-
- request = Request(
- method='put',
- endpoint='/_api/simple/remove-by-example',
- data=data,
- command='db.{}.removeByExample({}, {})'.format(
- self._name,
- dumps(filters),
- dumps(data)
- )
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentDeleteError(res)
- return res.body['deleted']
-
- return request, handler
-
- @api_method
- def import_bulk(self,
- documents,
- halt_on_error=None,
- details=True,
- from_prefix=None,
- to_prefix=None,
- overwrite=None,
- on_duplicate=None,
- sync=None):
- """Insert multiple documents into the collection.
-
- This is faster than :func:`arango.collections.Collection.insert_many`
- but does not return as much information. Any ``"_id"`` and ``"_rev"``
- fields in **documents** are ignored.
-
- :param documents: the list of the new documents to insert in bulk
- :type documents: list
- :param halt_on_error: halt the entire import on an error
- (default: ``True``)
- :type halt_on_error: bool
- :param details: if ``True``, the returned result will include an
- additional list of detailed error messages (default: ``True``)
- :type details: bool
- :param from_prefix: the string prefix to prepend to the ``"_from"``
- field of each edge document inserted. *This only works for edge
- collections.*
- :type from_prefix: str | unicode
- :param to_prefix: the string prefix to prepend to the ``"_to"`` field
- of each edge document inserted. *This only works for edge
- collections.*
- :type to_prefix: str | unicode
- :param overwrite: if ``True``, all existing documents in the collection
- are removed prior to the import. Indexes are still preserved.
- :type overwrite: bool
- :param on_duplicate: the action to take on unique key constraint
- violations. Possible values are:
-
- .. code-block:: none
-
- "error" : do not import the new documents and count them as
- errors (this is the default)
-
- "update" : update the existing documents while preserving any
- fields missing in the new ones
-
- "replace" : replace the existing documents with the new ones
-
- "ignore" : do not import the new documents and count them as
- ignored, as opposed to counting them as errors
-
- :type on_duplicate: str | unicode
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :returns: the result of the bulk import
- :rtype: dict
- :raises arango.exceptions.DocumentInsertError: if the documents cannot
- be inserted into the collection
-
- .. note::
- Parameters **from_prefix** and **to_prefix** only work for edge
- collections. When the prefix is prepended, it is followed by a
- ``"/"`` character. For example, prefix ``"foo"`` prepended to an
- edge document with ``"_from": "bar"`` will result in a new value
- ``"_from": "foo/bar"``.
-
- .. note::
- Parameter **on_duplicate** actions ``"update"``, ``"replace"``
- and ``"ignore"`` will work only when **documents** contain the
- ``"_key"`` fields.
-
- .. warning::
- Parameter **on_duplicate** actions ``"update"`` and ``"replace"``
- may fail on secondary unique key constraint violations.
- """
- params = {
- 'type': 'array',
- 'collection': self._name,
- 'complete': halt_on_error,
- 'details': details,
- }
- if halt_on_error is not None:
- params['complete'] = halt_on_error
- if details is not None:
- params['details'] = details
- if from_prefix is not None:
- params['fromPrefix'] = from_prefix
- if to_prefix is not None:
- params['toPrefix'] = to_prefix
- if overwrite is not None:
- params['overwrite'] = overwrite
- if on_duplicate is not None:
- params['onDuplicate'] = on_duplicate
- if sync is not None:
- params['waitForSync'] = sync
-
- request = Request(
- method='post',
- endpoint='/_api/import',
- data=documents,
- params=params
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentInsertError(res)
- return res.body
-
- return request, handler
diff --git a/arango/collections/vertex.py b/arango/collections/vertex.py
deleted file mode 100644
index 07de8694..00000000
--- a/arango/collections/vertex.py
+++ /dev/null
@@ -1,273 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from arango.api import api_method
-from arango.collections.base import BaseCollection
-from arango.exceptions import *
-from arango.request import Request
-from arango.utils import HTTP_OK
-
-
-class VertexCollection(BaseCollection):
- """ArangoDB vertex collection.
-
- A vertex collection consists of vertex documents. It is uniquely identified
- by its name, which must consist only of alphanumeric characters, hyphen and
- the underscore characters. Vertex collections share their namespace with
- other types of collections.
-
- The documents in a vertex collection are fully accessible from a standard
- collection. Managing documents through a vertex collection, however, adds
- additional guarantees: all modifications are executed in transactions and
- if a vertex is deleted all connected edges are also deleted.
-
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- :param graph_name: the name of the graph
- :type graph_name: str | unicode
- :param name: the name of the vertex collection
- :type name: str | unicode
- """
-
- def __init__(self, connection, graph_name, name):
- super(VertexCollection, self).__init__(connection, name)
- self._graph_name = graph_name
-
- def __repr__(self):
- return (
- ''
- .format(self._name, self._graph_name)
- )
-
- @property
- def graph_name(self):
- """Return the name of the graph.
-
- :returns: the name of the graph
- :rtype: str | unicode
- """
- return self._graph_name
-
- @api_method
- def get(self, key, rev=None):
- """Fetch a document by key from the vertex collection.
-
- :param key: the document key
- :type key: str | unicode
- :param rev: the document revision to be compared against the revision
- of the target document
- :type rev: str | unicode | None
- :returns: the vertex document or None if not found
- :rtype: dict | None
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the target document
- :raises arango.exceptions.DocumentGetError: if the document cannot
- be fetched from the collection
- """
- request = Request(
- method='get',
- endpoint='/_api/gharial/{}/vertex/{}/{}'.format(
- self._graph_name, self._name, key
- ),
- headers={'If-Match': rev} if rev else {}
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code == 404 and res.error_code == 1202:
- return None
- elif res.status_code not in HTTP_OK:
- raise DocumentGetError(res)
- return res.body['vertex']
-
- return request, handler
-
- @api_method
- def insert(self, document, sync=None):
- """Insert a new document into the vertex collection.
-
- If the ``"_key"`` field is present in **document**, its value is used
- as the key of the new document. Otherwise, the key is auto-generated.
-
- :param document: the document body
- :type document: dict
- :param sync: wait for the operation to sync to disk
- :type sync: bool | None
- :returns: the ID, revision and key of the document
- :rtype: dict
- :raises arango.exceptions.DocumentInsertError: if the document cannot
- be inserted into the collection
- """
- params = {}
- if sync is not None:
- params['waitForSync'] = sync
-
- request = Request(
- method='post',
- endpoint='/_api/gharial/{}/vertex/{}'.format(
- self._graph_name, self._name
- ),
- data=document,
- params=params
- )
-
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise DocumentInsertError(res)
- return res.body['vertex']
-
- return request, handler
-
- @api_method
- def update(self, document, keep_none=True, sync=None):
- """Update a document by its key in the vertex collection.
-
- The ``"_key"`` field must be present in **document**. If the ``"_rev"``
- field is present in **document**, its value is compared against the
- revision of the target document.
-
- :param document: the partial/full document with the updated values
- :type document: dict
- :param keep_none: if ``True``, the fields with value ``None`` are
- retained in the document, otherwise the fields are removed from
- the document completely
- :type keep_none: bool
- :param sync: wait for the operation to sync to disk
- :type sync: bool | None
- :returns: the ID, revision and key of the updated document
- :rtype: dict
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the target document
- :raises arango.exceptions.DocumentUpdateError: if the document cannot
- be updated
- """
- params = {'keepNull': keep_none}
- if sync is not None:
- params['waitForSync'] = sync
-
- headers = {}
- revision = document.get('_rev')
- if revision is not None:
- headers['If-Match'] = revision
-
- request = Request(
- method='patch',
- endpoint='/_api/gharial/{}/vertex/{}/{}'.format(
- self._graph_name, self._name, document['_key']
- ),
- data=document,
- params=params,
- headers=headers
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code not in HTTP_OK:
- raise DocumentUpdateError(res)
- vertex = res.body['vertex']
- vertex['_old_rev'] = vertex.pop('_oldRev')
- return vertex
-
- return request, handler
-
- @api_method
- def replace(self, document, sync=None):
- """Replace a document by its key in the vertex collection.
-
- The ``"_key"`` field must be present in **document**.
- If the ``"_rev"`` field is present in **document**, its value is
- compared against the revision of the target document.
-
- :param document: the new document
- :type document: dict
- :param sync: wait for operation to sync to disk
- :type sync: bool | None
- :returns: the ID, revision and key of the replaced document
- :rtype: dict
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the target document
- :raises arango.exceptions.DocumentReplaceError: if the document cannot
- be replaced
- """
- params = {}
- if sync is not None:
- params['waitForSync'] = sync
-
- headers = {}
- revision = document.get('_rev')
- if revision is not None:
- headers['If-Match'] = revision
-
- request = Request(
- method='put',
- endpoint='/_api/gharial/{}/vertex/{}/{}'.format(
- self._graph_name, self._name, document['_key']
- ),
- params=params,
- data=document,
- headers=headers
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code not in HTTP_OK:
- raise DocumentReplaceError(res)
- vertex = res.body['vertex']
- vertex['_old_rev'] = vertex.pop('_oldRev')
- return vertex
-
- return request, handler
-
- @api_method
- def delete(self, document, ignore_missing=False, sync=None):
- """Delete a document by its key from the vertex collection.
-
- The ``"_key"`` field must be present in **document**. If the ``"_rev"``
- field is present in **document**, its value is compared against the
- revision of the target document.
-
- :param document: the document to delete
- :type document: dict
- :param sync: wait for the operation to sync to disk
- :type sync: bool | None
- :param ignore_missing: ignore missing documents
- :type ignore_missing: bool
- :returns: whether the document was deleted successfully
- :rtype: bool
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the target document
- :raises arango.exceptions.DocumentDeleteError: if the document cannot
- be deleted from the collection
- """
- params = {}
- if sync is not None:
- params['waitForSync'] = sync
-
- headers = {}
- revision = document.get('_rev')
- if revision is not None:
- headers['If-Match'] = revision
-
- request = Request(
- method='delete',
- endpoint='/_api/gharial/{}/vertex/{}/{}'.format(
- self._graph_name, self._name, document['_key']
- ),
- params=params,
- headers=headers
- )
-
- def handler(res):
- if res.status_code == 412:
- raise DocumentRevisionError(res)
- elif res.status_code == 404 and res.error_code == 1202:
- if ignore_missing:
- return False
- raise DocumentDeleteError(res)
- if res.status_code not in HTTP_OK:
- raise DocumentDeleteError(res)
- return res.body['removed']
-
- return request, handler
diff --git a/arango/connection.py b/arango/connection.py
index 73798644..2464987d 100644
--- a/arango/connection.py
+++ b/arango/connection.py
@@ -1,327 +1,72 @@
from __future__ import absolute_import, unicode_literals
-import logging
+from arango.http import DefaultHTTPClient
-from arango.http_clients import DefaultHTTPClient
-from arango.utils import sanitize
+__all__ = ['Connection']
class Connection(object):
- """ArangoDB database connection.
+ """HTTP connection to specific ArangoDB database.
- :param protocol: the internet transfer protocol (default: ``"http"``)
- :type protocol: str | unicode
- :param host: ArangoDB host (default: ``"localhost"``)
- :type host: str | unicode
- :param port: ArangoDB port (default: ``8529``)
- :type port: int | str | unicode
- :param database: the name of the target database (default: ``"_system"``)
- :type database: str | unicode
- :param username: ArangoDB username (default: ``"root"``)
+ :param url: ArangoDB base URL.
+ :type url: str | unicode
+ :param db: Database name.
+ :type db: str | unicode
+ :param username: Username.
:type username: str | unicode
- :param password: ArangoDB password (default: ``""``)
+ :param password: Password.
:type password: str | unicode
- :param http_client: the HTTP client
- :type http_client: arango.clients.base.BaseHTTPClient
- :param enable_logging: log all API requests with a logger named "arango"
- :type enable_logging: bool
+ :param http_client: User-defined HTTP client.
+ :type http_client: arango.http.HTTPClient
"""
- def __init__(self,
- protocol='http',
- host='localhost',
- port=8529,
- database='_system',
- username='root',
- password='',
- http_client=None,
- enable_logging=True,
- logger=None):
-
- self._protocol = protocol.strip('/')
- self._host = host.strip('/')
- self._port = port
- self._database = database or '_system'
- self._url_prefix = '{protocol}://{host}:{port}/_db/{db}'.format(
- protocol=self._protocol,
- host=self._host,
- port=self._port,
- db=self._database
- )
+ def __init__(self, url, db, username, password, http_client):
+ self._url_prefix = '{}/_db/{}'.format(url, db)
+ self._db_name = db
self._username = username
- self._password = password
- self._http = http_client or DefaultHTTPClient()
- self._enable_logging = enable_logging
- self._type = 'standard'
- self._logger = logger or logging.getLogger('arango')
-
- def __repr__(self):
- return ''.format(self._database)
-
- @property
- def protocol(self):
- """Return the internet transfer protocol.
-
- :returns: the internet transfer protocol
- :rtype: str | unicode
- """
- return self._protocol
+ self._auth = (username, password)
+ self._http_client = http_client or DefaultHTTPClient()
@property
- def host(self):
- """Return the ArangoDB host.
+ def url_prefix(self):
+ """Return the ArangoDB URL prefix (base URL + database name).
- :returns: the ArangoDB host
+ :returns: ArangoDB URL prefix.
:rtype: str | unicode
"""
- return self._host
-
- @property
- def port(self):
- """Return the ArangoDB port.
-
- :returns: the ArangoDB port
- :rtype: int
- """
- return self._port
+ return self._url_prefix
@property
def username(self):
- """Return the ArangoDB username.
+ """Return the username.
- :returns: the ArangoDB username
+ :returns: Username.
:rtype: str | unicode
"""
return self._username
@property
- def password(self):
- """Return the ArangoDB user password.
-
- :returns: the ArangoDB user password
- :rtype: str | unicode
- """
- return self._password
-
- @property
- def database(self):
- """Return the name of the connected database.
-
- :returns: the name of the connected database
- :rtype: str | unicode
- """
- return self._database
-
- @property
- def http_client(self):
- """Return the HTTP client in use.
-
- :returns: the HTTP client in use
- :rtype: arango.http_clients.base.BaseHTTPClient
- """
- return self._http
-
- @property
- def logging_enabled(self):
- """Return ``True`` if logging is enabled, ``False`` otherwise.
-
- :returns: whether logging is enabled or not
- :rtype: bool
- """
- return self._enable_logging
-
- @property
- def has_logging(self): # pragma: no cover
- """Return ``True`` if logging is enabled, ``False`` otherwise.
-
- :returns: whether logging is enabled or not
- :rtype: bool
-
- .. warning::
- This property will be deprecated in the future.
- Use **logging_enabled** instead.
- """
- return self._enable_logging
-
- @property
- def type(self):
- """Return the connection type.
+ def db_name(self):
+ """Return the database name.
- :return: the connection type
+ :returns: Database name.
:rtype: str | unicode
"""
- return self._type
-
- def handle_request(self, request, handler):
- # from arango.async import AsyncExecution
- # from arango.exceptions import ArangoError
- # async = AsyncExecution(self, return_result=True)
- # response = async.handle_request(request, handler)
- # while response.status() != 'done':
- # pass
- # result = response.result()
- # if isinstance(result, ArangoError):
- # raise result
- # return result
-
- # from arango.batch import BatchExecution
- # from arango.exceptions import ArangoError
- #
- # batch = BatchExecution(self, return_result=True)
- # response = batch.handle_request(request, handler)
- # batch.commit()
- # result = response.result()
- # if isinstance(result, ArangoError):
- # raise result
- # return result
- return handler(getattr(self, request.method)(**request.kwargs))
-
- def head(self, endpoint, params=None, headers=None, **_):
- """Execute a **HEAD** API method.
-
- :param endpoint: the API endpoint
- :type endpoint: str | unicode
- :param params: the request parameters
- :type params: dict
- :param headers: the request headers
- :type headers: dict
- :returns: the ArangoDB http response
- :rtype: arango.response.Response
- """
- url = self._url_prefix + endpoint
- res = self._http.head(
- url=url,
- params=params,
- headers=headers,
- auth=(self._username, self._password)
- )
- if self._enable_logging:
- self._logger.debug('HEAD {} {}'.format(url, res.status_code))
- return res
-
- def get(self, endpoint, params=None, headers=None, **_):
- """Execute a **GET** API method.
-
- :param endpoint: the API endpoint
- :type endpoint: str | unicode
- :param params: the request parameters
- :type params: dict
- :param headers: the request headers
- :type headers: dict
- :returns: the ArangoDB http response
- :rtype: arango.response.Response
- """
- url = self._url_prefix + endpoint
- res = self._http.get(
- url=url,
- params=params,
- headers=headers,
- auth=(self._username, self._password)
- )
- if self._enable_logging:
- self._logger.debug('GET {} {}'.format(url, res.status_code))
- return res
-
- def put(self, endpoint, data=None, params=None, headers=None, **_):
- """Execute a **PUT** API method.
-
- :param endpoint: the API endpoint
- :type endpoint: str | unicode
- :param data: the request payload
- :type data: str | unicode | dict
- :param params: the request parameters
- :type params: dict
- :param headers: the request headers
- :type headers: dict
- :returns: the ArangoDB http response
- :rtype: arango.response.Response
- """
- url = self._url_prefix + endpoint
- res = self._http.put(
- url=url,
- data=sanitize(data),
- params=params,
- headers=headers,
- auth=(self._username, self._password)
- )
- if self._enable_logging:
- self._logger.debug('PUT {} {}'.format(url, res.status_code))
- return res
-
- def post(self, endpoint, data=None, params=None, headers=None, **_):
- """Execute a **POST** API method.
-
- :param endpoint: the API endpoint
- :type endpoint: str | unicode
- :param data: the request payload
- :type data: str | unicode | dict
- :param params: the request parameters
- :type params: dict
- :param headers: the request headers
- :type headers: dict
- :returns: the ArangoDB http response
- :rtype: arango.response.Response
- """
- url = self._url_prefix + endpoint
- res = self._http.post(
- url=url,
- data=sanitize(data),
- params=params,
- headers=headers,
- auth=(self._username, self._password)
- )
- if self._enable_logging:
- self._logger.debug('POST {} {}'.format(url, res.status_code))
- return res
-
- def patch(self, endpoint, data=None, params=None, headers=None, **_):
- """Execute a **PATCH** API method.
-
- :param endpoint: the API endpoint
- :type endpoint: str | unicode
- :param data: the request payload
- :type data: str | unicode | dict
- :param params: the request parameters
- :type params: dict
- :param headers: the request headers
- :type headers: dict
- :returns: the ArangoDB http response
- :rtype: arango.response.Response
- """
- url = self._url_prefix + endpoint
- res = self._http.patch(
- url=url,
- data=sanitize(data),
- params=params,
- headers=headers,
- auth=(self._username, self._password)
- )
- if self._enable_logging:
- self._logger.debug('PATCH {} {}'.format(url, res.status_code))
- return res
+ return self._db_name
- def delete(self, endpoint, data=None, params=None, headers=None, **_):
- """Execute a **DELETE** API method.
+ def send_request(self, request):
+ """Send an HTTP request to ArangoDB server.
- :param endpoint: the API endpoint
- :type endpoint: str | unicode
- :param data: the request payload
- :type data: str | unicode | dict
- :param params: the request parameters
- :type params: dict
- :param headers: the request headers
- :type headers: dict
- :returns: the ArangoDB http response
+ :param request: HTTP request.
+ :type request: arango.request.Request
+ :return: HTTP response.
:rtype: arango.response.Response
"""
- url = self._url_prefix + endpoint
- res = self._http.delete(
- url=url,
- data=sanitize(data),
- params=params,
- headers=headers,
- auth=(self._username, self._password)
+ return self._http_client.send_request(
+ method=request.method,
+ url=self._url_prefix + request.endpoint,
+ params=request.params,
+ data=request.data,
+ headers=request.headers,
+ auth=self._auth,
)
- if self._enable_logging:
- self._logger.debug('DELETE {} {}'.format(url, res.status_code))
- return res
diff --git a/arango/cursor.py b/arango/cursor.py
index 9119833f..1555bc61 100644
--- a/arango/cursor.py
+++ b/arango/cursor.py
@@ -1,29 +1,72 @@
from __future__ import absolute_import, unicode_literals
-from arango.utils import HTTP_OK
+__all__ = ['Cursor']
+
+from collections import deque
+
from arango.exceptions import (
CursorNextError,
CursorCloseError,
+ CursorStateError,
+ CursorEmptyError
)
+from arango.request import Request
class Cursor(object):
- """ArangoDB cursor which returns documents from the server in batches.
+ """Cursor API wrapper.
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- :param init_data: the cursor initialization data
- :type init_data: dict
- :raises CursorNextError: if the next batch cannot be retrieved
- :raises CursorCloseError: if the cursor cannot be closed
+ Cursors fetch query results from ArangoDB server in batches. Cursor objects
+ are *stateful* as they store the fetched items in-memory. They must not be
+ shared across threads without proper locking mechanism.
- .. note::
- This class is designed to be instantiated internally only.
+ In transactions, the entire result set is loaded into the cursor. Therefore
+ you must be mindful of client-side memory capacity when running queries
+ that can potentially return a large result set.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param init_data: Cursor initialization data.
+ :type init_data: dict | list
+ :param cursor_type: Cursor type ("cursor" or "export").
+ :type cursor_type: str | unicode
"""
- def __init__(self, connection, init_data):
+ __slots__ = [
+ '_conn',
+ '_type',
+ '_id',
+ '_count',
+ '_cached',
+ '_stats',
+ '_profile',
+ '_warnings',
+ '_has_more',
+ '_batch',
+ '_count'
+ ]
+
+ def __init__(self, connection, init_data, cursor_type='cursor'):
self._conn = connection
- self._data = init_data
+ self._type = cursor_type
+ self._batch = deque()
+ self._id = None
+ self._count = None
+ self._cached = None
+ self._stats = None
+ self._profile = None
+ self._warnings = None
+
+ if isinstance(init_data, list):
+ # In transactions, cursor initialization data is a list containing
+ # the entire result set.
+ self._has_more = False
+ self._batch.extend(init_data)
+ self._count = len(init_data)
+ else:
+ # In other execution contexts, cursor initialization data is a dict
+ # containing cursor metadata (e.g. ID, parameters).
+ self._update(init_data)
def __iter__(self):
return self
@@ -34,155 +77,226 @@ def __next__(self):
def __enter__(self):
return self
+ def __len__(self):
+ return self._count
+
def __exit__(self, *_):
self.close(ignore_missing=True)
def __repr__(self):
- if self.id is None:
- return ''
- return ''.format(self.id)
+ return ''.format(self._id) if self._id else ''
+
+ def _update(self, data):
+ """Update the cursor using data from ArangoDB server.
+
+ :param data: Cursor data from ArangoDB server (e.g. results).
+ :type data: dict
+ """
+ result = {}
+
+ if 'id' in data:
+ self._id = data['id']
+ result['id'] = data['id']
+ if 'count' in data:
+ self._count = data['count']
+ result['count'] = data['count']
+ if 'cached' in data:
+ self._cached = data['cached']
+ result['cached'] = data['cached']
+
+ self._has_more = data['hasMore']
+ result['has_more'] = data['hasMore']
+
+ self._batch.extend(data['result'])
+ result['batch'] = data['result']
+
+ if 'extra' in data:
+ extra = data['extra']
+
+ if 'profile' in extra:
+ self._profile = extra['profile']
+ result['profile'] = extra['profile']
+
+ if 'warnings' in extra:
+ self._warnings = extra['warnings']
+ result['warnings'] = extra['warnings']
+
+ if 'stats' in extra:
+ stats = extra['stats']
+ if 'writesExecuted' in stats:
+ stats['modified'] = stats.pop('writesExecuted')
+ if 'writesIgnored' in stats:
+ stats['ignored'] = stats.pop('writesIgnored')
+ if 'scannedFull' in stats:
+ stats['scanned_full'] = stats.pop('scannedFull')
+ if 'scannedIndex' in stats:
+ stats['scanned_index'] = stats.pop('scannedIndex')
+ if 'executionTime' in stats:
+ stats['execution_time'] = stats.pop('executionTime')
+ if 'httpRequests' in stats:
+ stats['http_requests'] = stats.pop('httpRequests')
+ self._stats = stats
+ result['statistics'] = stats
+
+ return result
@property
def id(self):
"""Return the cursor ID.
- :returns: the cursor ID
- :rtype: str
+ :return: Cursor ID.
+ :rtype: str | unicode
"""
- return self._data.get('id')
+ return self._id
+
+ @property
+ def type(self):
+ """Return the cursor type.
+
+ :return: Cursor type ("cursor" or "export").
+ :rtype: str | unicode
+ """
+ return self._type
def batch(self):
- """Return the current batch of documents.
+ """Return the current batch of results.
- :returns: the current batch of documents
- :rtype: list
+ :return: Current batch.
+ :rtype: collections.deque
"""
- return self._data['result']
+ return self._batch
def has_more(self):
- """Indicates whether more results are available.
+ """Return True if more results are available on the server.
- :returns: whether more results are available
+ :return: True if more results are available on the server.
:rtype: bool
"""
- return self._data['hasMore']
+ return self._has_more
def count(self):
- """Return the total number of documents in the results.
+ """Return the total number of documents in the entire result set.
- .. note::
- If the cursor was not initialized with the count option enabled,
- None is returned instead.
-
- :returns: the total number of results
- :rtype: int
+ :return: Total number of documents, or None if the count option
+ was not enabled during cursor initialization.
+ :rtype: int | None
"""
- return self._data.get('count')
+ return self._count
def cached(self):
- """Return whether the result is cached or not.
+ """Return True if results are cached.
- :return: whether the result is cached or not
+ :return: True if results are cached.
:rtype: bool
"""
- return self._data.get('cached')
+ return self._cached
def statistics(self):
- """Return any available cursor stats.
+ """Return cursor statistics.
- :return: the cursor stats
+ :return: Cursor statistics.
:rtype: dict
"""
- if 'extra' in self._data and 'stats' in self._data['extra']:
- stats = dict(self._data['extra']['stats'])
- stats['modified'] = stats.pop('writesExecuted', None)
- stats['ignored'] = stats.pop('writesIgnored', None)
- stats['scanned_full'] = stats.pop('scannedFull', None)
- stats['scanned_index'] = stats.pop('scannedIndex', None)
- stats['execution_time'] = stats.pop('executionTime', None)
- return stats
+ return self._stats
+
+ def profile(self):
+ """Return cursor performance profile.
+
+ :return: Cursor performance profile.
+ :rtype: dict
+ """
+ return self._profile
def warnings(self):
- """Return any warnings (e.g. from the query execution).
+ """Return any warnings from the query execution.
- :returns: the warnings
+ :return: Warnings, or None if there are none.
:rtype: list
"""
- if 'extra' in self._data and 'warnings' in self._data['extra']:
- return self._data['extra']['warnings']
+ return self._warnings
- def next(self):
- """Read the next result from the cursor.
+ def empty(self):
+ """Check if the current batch is empty.
- :returns: the next item in the cursor
- :rtype: dict
- :raises: StopIteration, CursorNextError
- """
- if not self.batch() and self.has_more():
- res = self._conn.put("/_api/cursor/{}".format(self.id))
- if res.status_code not in HTTP_OK:
- raise CursorNextError(res)
- self._data = res.body
- elif not self.batch() and not self.has_more():
- raise StopIteration
- return self.batch().pop(0)
-
- def close(self, ignore_missing=True):
- """Close the cursor and free the resources tied to it.
-
- :returns: whether the cursor was closed successfully
+ :return: True if current batch is empty, False otherwise.
:rtype: bool
- :param ignore_missing: ignore missing cursors
- :type ignore_missing: bool
- :raises: CursorCloseError
"""
- if not self.id:
- return False
- res = self._conn.delete("/_api/cursor/{}".format(self.id))
- if res.status_code not in HTTP_OK:
- if res.status_code == 404 and ignore_missing:
- return False
- raise CursorCloseError(res)
- return True
+ return len(self._batch) == 0
+ def next(self):
+ """Pop the next item from the current batch.
-class ExportCursor(Cursor): # pragma: no cover
- """ArangoDB cursor for export queries only.
+ If current batch is empty/depleted, an API request is automatically
+ sent to ArangoDB server to fetch the next batch and update the cursor.
- .. note::
- This class is designed to be instantiated internally only.
- """
+ :return: Next item in current batch.
+ :rtype: str | unicode | bool | int | list | dict
+ :raise StopIteration: If the result set is depleted.
+ :raise arango.exceptions.CursorNextError: If batch retrieval fails.
+ :raise arango.exceptions.CursorStateError: If cursor ID is not set.
+ """
+ if self.empty():
+ if not self.has_more():
+ raise StopIteration
+ self.fetch()
- def next(self):
- """Read the next result from the cursor.
+ return self.pop()
+
+ def pop(self):
+ """Pop the next item from current batch.
+
+ If current batch is empty/depleted, an exception is raised. You must
+ call :func:`arango.cursor.Cursor.fetch` to manually fetch the next
+ batch from server.
+
+ :return: Next item in current batch.
+ :rtype: str | unicode | bool | int | list | dict
+ :raise arango.exceptions.CursorEmptyError: If current batch is empty.
+ """
+ if len(self._batch) == 0:
+ raise CursorEmptyError('current batch is empty')
+ return self._batch.popleft()
+
+ def fetch(self):
+ """Fetch the next batch from server and update the cursor.
- :returns: the next item in the cursor
+ :return: New batch details.
:rtype: dict
- :raises: StopIteration, CursorNextError
+ :raise arango.exceptions.CursorNextError: If batch retrieval fails.
+ :raise arango.exceptions.CursorStateError: If cursor ID is not set.
"""
- if not self.batch() and self.has_more():
- res = self._conn.put("/_api/export/{}".format(self.id))
- if res.status_code not in HTTP_OK:
- raise CursorNextError(res)
- self._data = res.body
- elif not self.batch() and not self.has_more():
- raise StopIteration
- return self.batch().pop(0)
-
- def close(self, ignore_missing=True):
- """Close the cursor and free the resources tied to it.
-
- :returns: whether the cursor was closed successfully
- :rtype: bool
- :param ignore_missing: ignore missing cursors
+ if self._id is None:
+ raise CursorStateError('cursor ID not set')
+ request = Request(
+ method='put',
+ endpoint='/_api/{}/{}'.format(self._type, self._id)
+ )
+ resp = self._conn.send_request(request)
+
+ if not resp.is_success:
+ raise CursorNextError(resp, request)
+ return self._update(resp.body)
+
+ def close(self, ignore_missing=False):
+ """Close the cursor and free any server resources tied to it.
+
+ :param ignore_missing: Do not raise exception on missing cursors.
:type ignore_missing: bool
- :raises: CursorCloseError
+ :return: True if cursor was closed successfully, False if cursor was
+ not found and **ignore_missing** was set to True.
+ :rtype: bool
+ :raise arango.exceptions.CursorCloseError: If operation fails.
+ :raise arango.exceptions.CursorStateError: If cursor ID is not set.
"""
- if not self.id:
+ if self._id is None:
+ raise CursorStateError('cursor ID not set')
+ request = Request(
+ method='delete',
+ endpoint='/_api/{}/{}'.format(self._type, self._id)
+ )
+ resp = self._conn.send_request(request)
+ if resp.is_success:
+ return True
+ if resp.status_code == 404 and ignore_missing:
return False
- res = self._conn.delete("/_api/export/{}".format(self.id))
- if res.status_code not in HTTP_OK:
- if res.status_code == 404 and ignore_missing:
- return False
- raise CursorCloseError(res)
- return True
+ raise CursorCloseError(resp, request)
diff --git a/arango/database.py b/arango/database.py
index 3d2828a7..a0cd5948 100644
--- a/arango/database.py
+++ b/arango/database.py
@@ -1,260 +1,507 @@
from __future__ import absolute_import, unicode_literals
-from datetime import datetime
+from arango.utils import get_col_name
-from requests import ConnectionError
+__all__ = [
+ 'StandardDatabase',
+ 'AsyncDatabase',
+ 'BatchDatabase',
+ 'TransactionDatabase'
+]
-from arango.async import AsyncExecution
-from arango.batch import BatchExecution
-from arango.cluster import ClusterTest
-from arango.collections import Collection
-from arango.utils import HTTP_OK
-from arango.exceptions import *
-from arango.graph import Graph
-from arango.transaction import Transaction
+from datetime import datetime
+
+from arango.api import APIWrapper
from arango.aql import AQL
-from arango.wal import WriteAheadLog
+from arango.executor import (
+ DefaultExecutor,
+ AsyncExecutor,
+ BatchExecutor,
+ TransactionExecutor,
+)
+from arango.collection import StandardCollection
+from arango.exceptions import (
+ AsyncJobClearError,
+ AsyncJobListError,
+ CollectionCreateError,
+ CollectionDeleteError,
+ CollectionListError,
+ DatabaseDeleteError,
+ DatabaseCreateError,
+ DatabaseListError,
+ DatabasePropertiesError,
+ GraphListError,
+ GraphCreateError,
+ GraphDeleteError,
+ PermissionListError,
+ PermissionGetError,
+ PermissionResetError,
+ PermissionUpdateError,
+ ServerConnectionError,
+ ServerEndpointsError,
+ ServerEngineError,
+ ServerDetailsError,
+ ServerEchoError,
+ ServerLogLevelError,
+ ServerLogLevelSetError,
+ ServerReadLogError,
+ ServerReloadRoutingError,
+ ServerRequiredDBVersionError,
+ ServerRoleError,
+ ServerRunTestsError,
+ ServerShutdownError,
+ ServerStatisticsError,
+ ServerTimeError,
+ ServerVersionError,
+ TaskCreateError,
+ TaskDeleteError,
+ TaskGetError,
+ TaskListError,
+ TransactionExecuteError,
+ UserCreateError,
+ UserDeleteError,
+ UserGetError,
+ UserListError,
+ UserReplaceError,
+ UserUpdateError,
+)
+from arango.foxx import Foxx
+from arango.graph import Graph
+from arango.pregel import Pregel
+from arango.request import Request
+from arango.wal import WAL
-class Database(object):
- """ArangoDB database.
+class Database(APIWrapper):
+ """Base class for Database API wrappers.
- :param connection: ArangoDB database connection
+ :param connection: HTTP connection.
:type connection: arango.connection.Connection
-
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
"""
- def __init__(self, connection):
- self._conn = connection
- self._aql = AQL(self._conn)
- self._wal = WriteAheadLog(self._conn)
-
- def __repr__(self):
- return ''.format(self._conn.database)
+ def __init__(self, connection, executor):
+ super(Database, self).__init__(connection, executor)
def __getitem__(self, name):
+ """Return the collection API wrapper.
+
+ :param name: Collection name.
+ :type name: str | unicode
+ :return: Collection API wrapper.
+ :rtype: arango.collection.StandardCollection
+ """
return self.collection(name)
- @property
- def connection(self):
- """Return the connection object.
+ def _get_col_by_doc(self, document):
+ """Return the collection of the given document.
- :return: the database connection object
- :rtype: arango.connection.Connection
+ :param document: Document ID or body with "_id" field.
+ :type document: str | unicode | dict
+ :return: Collection API wrapper.
+ :rtype: arango.collection.StandardCollection
+ :raise arango.exceptions.DocumentParseError: On malformed document.
"""
- return self._conn
+ return self.collection(get_col_name(document))
@property
def name(self):
- """Return the name of the database.
+ """Return database name.
- :returns: the name of the database
+ :return: Database name.
:rtype: str | unicode
"""
- return self._conn.database
+ return self.db_name
@property
def aql(self):
- """Return the AQL object used to execute AQL statements.
+ """Return AQL (ArangoDB Query Language) API wrapper.
- Refer to :class:`arango.query.Query` for more information.
-
- :returns: the AQL object
- :rtype: arango.query.AQL
+ :return: AQL API wrapper.
+ :rtype: arango.aql.AQL
"""
- return self._aql
+ return AQL(self._conn, self._executor)
@property
def wal(self):
- """Return the write-ahead log object.
+ """Return WAL (Write-Ahead Log) API wrapper.
- :returns: the write-ahead log object
- :rtype: arango.wal.WriteAheadLog
+ :return: WAL API wrapper.
+ :rtype: arango.wal.WAL
"""
- return self._wal
+ return WAL(self._conn, self._executor)
- def verify(self):
- """Verify the connection to ArangoDB server.
+ @property
+ def foxx(self):
+ """Return Foxx API wrapper.
- :returns: ``True`` if the connection is successful
- :rtype: bool
- :raises arango.exceptions.ServerConnectionError: if the connection to
- the ArangoDB server fails
+ :return: Foxx API wrapper.
+ :rtype: arango.foxx.Foxx
+ """
+ return Foxx(self._conn, self._executor)
+
+ @property
+ def pregel(self):
+ """Return Pregel API wrapper.
+
+ :return: Pregel API wrapper.
+ :rtype: arango.pregel.Pregel
"""
- res = self._conn.head('/_api/version')
- if res.status_code not in HTTP_OK:
- raise ServerConnectionError(res)
- return True
+ return Pregel(self._conn, self._executor)
+
+ def properties(self):
+ """Return database properties.
+
+ :return: Database properties.
+ :rtype: dict
+ :raise arango.exceptions.DatabasePropertiesError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/database/current',
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DatabasePropertiesError(resp, request)
+ result = resp.body['result']
+ result['system'] = result.pop('isSystem')
+ return result
+
+ return self._execute(request, response_handler)
+
+ def execute_transaction(self,
+ command,
+ params=None,
+ read=None,
+ write=None,
+ sync=None,
+ timeout=None,
+ max_size=None,
+ allow_implicit=None,
+ intermediate_commit_count=None,
+ intermediate_commit_size=None):
+ """Execute raw Javascript command in transaction.
+
+ :param command: Javascript command to execute.
+ :type command: str | unicode
+ :param read: Names of collections read during transaction. If parameter
+ **allow_implicit** is set to True, any undeclared read collections
+ are loaded lazily.
+ :type read: [str | unicode]
+ :param write: Names of collections written to during transaction.
+ Transaction fails on undeclared write collections.
+ :type write: [str | unicode]
+ :param params: Optional parameters passed into the Javascript command.
+ :type params: dict
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param timeout: Timeout for waiting on collection locks. If set to 0,
+ ArangoDB server waits indefinitely. If not set, system default
+ value is used.
+ :type timeout: int
+ :param max_size: Max transaction size limit in bytes. Applies only
+ to RocksDB storage engine.
+ :type max_size: int
+ :param allow_implicit: If set to True, undeclared read collections are
+ loaded lazily. If set to False, transaction fails on any undeclared
+ collections.
+ :type allow_implicit: bool
+ :param intermediate_commit_count: Max number of operations after which
+ an intermediate commit is performed automatically. Applies only to
+ RocksDB storage engine.
+ :type intermediate_commit_count: int
+ :param intermediate_commit_size: Max size of operations in bytes after
+ which an intermediate commit is performed automatically. Applies
+ only to RocksDB storage engine.
+ :type intermediate_commit_size: int
+ :return: Return value of **command**.
+ :rtype: str | unicode
+ :raise arango.exceptions.TransactionExecuteError: If execution fails.
+ """
+ collections = {'allowImplicit': allow_implicit}
+ if read is not None:
+ collections['read'] = read
+ if write is not None:
+ collections['write'] = write
+
+ data = {'action': command}
+ if collections:
+ data['collections'] = collections
+ if params is not None:
+ data['params'] = params
+ if timeout is not None:
+ data['lockTimeout'] = timeout
+ if sync is not None:
+ data['waitForSync'] = sync
+ if max_size is not None:
+ data['maxTransactionSize'] = max_size
+ if intermediate_commit_count is not None:
+ data['intermediateCommitCount'] = intermediate_commit_count
+ if intermediate_commit_size is not None:
+ data['intermediateCommitSize'] = intermediate_commit_size
+
+ request = Request(
+ method='post',
+ endpoint='/_api/transaction',
+ data=data
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise TransactionExecuteError(resp, request)
+ return resp.body.get('result')
+
+ return self._execute(request, response_handler)
def version(self):
- """Return the version of the ArangoDB server.
+ """Return ArangoDB server version.
- :returns: the server version
+ :return: Server version.
:rtype: str | unicode
- :raises arango.exceptions.ServerVersionError: if the server version
- cannot be retrieved
+ :raise arango.exceptions.ServerVersionError: If retrieval fails.
"""
- res = self._conn.get(
+ request = Request(
+ method='get',
endpoint='/_api/version',
params={'details': False}
)
- if res.status_code not in HTTP_OK:
- raise ServerVersionError(res)
- return res.body['version']
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerVersionError(resp, request)
+ return resp.body['version']
+
+ return self._execute(request, response_handler)
def details(self):
- """Return the component details on the ArangoDB server.
+ """Return ArangoDB server details.
- :returns: the server details
+ :return: Server details.
:rtype: dict
- :raises arango.exceptions.ServerDetailsError: if the server details
- cannot be retrieved
+ :raise arango.exceptions.ServerDetailsError: If retrieval fails.
"""
- res = self._conn.get(
+ request = Request(
+ method='get',
endpoint='/_api/version',
params={'details': True}
)
- if res.status_code not in HTTP_OK:
- raise ServerDetailsError(res)
- return res.body['details']
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerDetailsError(resp, request)
+ return resp.body['details']
+
+ return self._execute(request, response_handler)
def required_db_version(self):
- """Return the required version of the target database.
+ """Return required version of target database.
- :returns: the required version of the target database
+ :return: Required version of target database.
:rtype: str | unicode
- :raises arango.exceptions.ServerRequiredDBVersionError: if the
- required database version cannot be retrieved
+ :raise arango.exceptions.ServerRequiredDBVersionError: If retrieval
+ fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_admin/database/target-version'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerRequiredDBVersionError(resp, request)
+ return resp.body['version']
+
+ return self._execute(request, response_handler)
+
+ def endpoints(self): # pragma: no cover
+ """Return coordinate endpoints. This method is for clusters only.
+
+ :return: List of endpoints.
+ :rtype: [str | unicode]
+ :raise arango.exceptions.ServerEndpointsError: If retrieval fails.
"""
- res = self._conn.get('/_admin/database/target-version')
- if res.status_code not in HTTP_OK:
- raise ServerRequiredDBVersionError(res)
- return res.body['version']
+ request = Request(
+ method='get',
+ endpoint='/_api/cluster/endpoints'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerEndpointsError(resp, request)
+ return [item['endpoint'] for item in resp.body['endpoints']]
+
+ return self._execute(request, response_handler)
+
+ def engine(self):
+ """Return the database engine details.
+
+ :return: Database engine details.
+ :rtype: str | unicode
+ :raise arango.exceptions.ServerEngineError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/engine',
+ command='db._engine()'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerEngineError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def ping(self):
+ """Ping the ArangoDB server by sending a test request.
+
+ :return: Response code from server.
+ :rtype: int
+ :raise arango.exceptions.ServerConnectionError: If ping fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/collection',
+ )
+
+ def response_handler(resp):
+ code = resp.status_code
+ if code in {401, 403}:
+ raise ServerConnectionError('bad username and/or password')
+ if not resp.is_success:
+ raise ServerConnectionError(
+ resp.error_message or 'bad server response')
+ return code
+
+ return self._execute(request, response_handler)
def statistics(self, description=False):
- """Return the server statistics.
+ """Return server statistics.
- :returns: the statistics information
+ :return: Server statistics.
:rtype: dict
- :raises arango.exceptions.ServerStatisticsError: if the server
- statistics cannot be retrieved
+ :raise arango.exceptions.ServerStatisticsError: If retrieval fails.
"""
- res = self._conn.get(
- '/_admin/statistics-description'
- if description else '/_admin/statistics'
+ if description:
+ url = '/_admin/statistics-description'
+ else:
+ url = '/_admin/statistics'
+
+ request = Request(
+ method='get',
+ endpoint=url
)
- if res.status_code not in HTTP_OK:
- raise ServerStatisticsError(res)
- res.body.pop('code', None)
- res.body.pop('error', None)
- return res.body
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerStatisticsError(resp, request)
+ resp.body.pop('code')
+ resp.body.pop('error')
+ return resp.body
+
+ return self._execute(request, response_handler)
def role(self):
- """Return the role of the server in the cluster if any.
-
- :returns: the server role which can be ``"SINGLE"`` (the server is not
- in a cluster), ``"COORDINATOR"`` (the server is a coordinator in
- the cluster), ``"PRIMARY"`` (the server is a primary database in
- the cluster), ``"SECONDARY"`` (the server is a secondary database
- in the cluster) or ``"UNDEFINED"`` (the server role is undefined,
- the only possible value for a single server)
+ """Return server role in cluster.
+
+ :return: Server role. Possible values are "SINGLE" (server which is not
+ in a cluster), "COORDINATOR" (cluster coordinator), "PRIMARY",
+ "SECONDARY" or "UNDEFINED".
:rtype: str | unicode
- :raises arango.exceptions.ServerRoleError: if the server role cannot
- be retrieved
+ :raise arango.exceptions.ServerRoleError: If retrieval fails.
"""
- res = self._conn.get('/_admin/server/role')
- if res.status_code not in HTTP_OK:
- raise ServerRoleError(res)
- return res.body.get('role')
+ request = Request(
+ method='get',
+ endpoint='/_admin/server/role'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerRoleError(resp, request)
+ return resp.body.get('role')
+
+ return self._execute(request, response_handler)
def time(self):
- """Return the current server system time.
+ """Return server system time.
- :returns: the server system time
+ :return: Server system time.
:rtype: datetime.datetime
- :raises arango.exceptions.ServerTimeError: if the server time
- cannot be retrieved
+ :raise arango.exceptions.ServerTimeError: If retrieval fails.
"""
- res = self._conn.get('/_admin/time')
- if res.status_code not in HTTP_OK:
- raise ServerTimeError(res)
- return datetime.fromtimestamp(res.body['time'])
+ request = Request(
+ method='get',
+ endpoint='/_admin/time'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerTimeError(resp, request)
+ return datetime.fromtimestamp(resp.body['time'])
+
+ return self._execute(request, response_handler)
def echo(self):
- """Return information on the last request (headers, payload etc.)
+ """Return details of the last request (e.g. headers, payload).
- :returns: the details of the last request
+ :return: Details of the last request.
:rtype: dict
- :raises arango.exceptions.ServerEchoError: if the last request cannot
- be retrieved from the server
+ :raise arango.exceptions.ServerEchoError: If retrieval fails.
"""
- res = self._conn.get('/_admin/echo')
- if res.status_code not in HTTP_OK:
- raise ServerEchoError(res)
- return res.body
+ request = Request(
+ method='get',
+ endpoint='/_admin/echo'
+ )
- def sleep(self, seconds):
- """Suspend the execution for a specified duration before returning.
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerEchoError(resp, request)
+ return resp.body
- :param seconds: the number of seconds to suspend
- :type seconds: int
- :returns: the number of seconds suspended
- :rtype: int
- :raises arango.exceptions.ServerSleepError: if the server cannot be
- suspended
- """
- res = self._conn.get(
- '/_admin/sleep',
- params={'duration': seconds}
- )
- if res.status_code not in HTTP_OK:
- raise ServerSleepError(res)
- return res.body['duration']
+ return self._execute(request, response_handler)
def shutdown(self): # pragma: no cover
- """Initiate the server shutdown sequence.
+ """Initiate server shutdown sequence.
- :returns: whether the server was shutdown successfully
+ :return: True if the server was shutdown successfully.
:rtype: bool
- :raises arango.exceptions.ServerShutdownError: if the server shutdown
- sequence cannot be initiated
+ :raise arango.exceptions.ServerShutdownError: If shutdown fails.
"""
- try:
- res = self._conn.delete('/_admin/shutdown')
- except ConnectionError:
- return False
- if res.status_code not in HTTP_OK:
- raise ServerShutdownError(res)
- return True
+ request = Request(
+ method='delete',
+ endpoint='/_admin/shutdown'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerShutdownError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
def run_tests(self, tests): # pragma: no cover
- """Run the available unittests on the server.
+ """Run available unittests on the server.
- :param tests: list of files containing the test suites
- :type tests: list
- :returns: the test results
+ :param tests: List of files containing the test suites.
+ :type tests: [str | unicode]
+ :return: Test results.
:rtype: dict
- :raises arango.exceptions.ServerRunTestsError: if the test suites fail
+ :raise arango.exceptions.ServerRunTestsError: If execution fails.
"""
- res = self._conn.post('/_admin/test', data={'tests': tests})
- if res.status_code not in HTTP_OK:
- raise ServerRunTestsError(res)
- return res.body
+ request = Request(
+ method='post',
+ endpoint='/_admin/test',
+ data={'tests': tests}
+ )
- def execute(self, program): # pragma: no cover
- """Execute a Javascript program on the server.
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerRunTestsError(resp, request)
+ return resp.body
- :param program: the body of the Javascript program to execute.
- :type program: str | unicode
- :returns: the result of the execution
- :rtype: str | unicode
- :raises arango.exceptions.ServerExecuteError: if the program cannot
- be executed on the server
- """
- res = self._conn.post('/_admin/execute', data=program)
- if res.status_code not in HTTP_OK:
- raise ServerExecuteError(res)
- return res.body
+ return self._execute(request, response_handler)
def read_log(self,
upto=None,
@@ -264,34 +511,32 @@ def read_log(self,
offset=None,
search=None,
sort=None):
- """Read the global log from the server.
+ """Read the global log from server.
- :param upto: return the log entries up to the given level (mutually
- exclusive with argument **level**), which must be ``"fatal"``,
- ``"error"``, ``"warning"``, ``"info"`` (default) or ``"debug"``
+ :param upto: Return the log entries up to the given level (mutually
+ exclusive with parameter **level**). Allowed values are "fatal",
+ "error", "warning", "info" (default) and "debug".
:type upto: str | unicode | int
- :param level: return the log entries of only the given level (mutually
- exclusive with **upto**), which must be ``"fatal"``, ``"error"``,
- ``"warning"``, ``"info"`` (default) or ``"debug"``
+ :param level: Return the log entries of only the given level (mutually
+ exclusive with **upto**). Allowed values are "fatal", "error",
+ "warning", "info" (default) and "debug".
:type level: str | unicode | int
- :param start: return the log entries whose ID is greater or equal to
- the given value
+ :param start: Return the log entries whose ID is greater or equal to
+ the given value.
:type start: int
- :param size: restrict the size of the result to the given value (this
- setting can be used for pagination)
+ :param size: Restrict the size of the result to the given value. This
+ can be used for pagination.
:type size: int
- :param offset: the number of entries to skip initially (this setting
- can be setting can be used for pagination)
+ :param offset: Number of entries to skip (e.g. for pagination).
:type offset: int
- :param search: return only the log entries containing the given text
+ :param search: Return only the log entries containing the given text.
:type search: str | unicode
- :param sort: sort the log entries according to the given fashion, which
- can be ``"sort"`` or ``"desc"``
+ :param sort: Sort the log entries according to the given fashion, which
+ can be "sort" or "desc".
:type sort: str | unicode
- :returns: the server log entries
+ :return: Server log entries.
:rtype: dict
- :raises arango.exceptions.ServerReadLogError: if the server log entries
- cannot be read
+ :raise arango.exceptions.ServerReadLogError: If read fails.
"""
params = dict()
if upto is not None:
@@ -308,26 +553,39 @@ def read_log(self,
params['search'] = search
if sort is not None:
params['sort'] = sort
- res = self._conn.get('/_admin/log')
- if res.status_code not in HTTP_OK:
- raise ServerReadLogError(res)
- if 'totalAmount' in res.body:
- res.body['total_amount'] = res.body.pop('totalAmount')
- return res.body
+
+ request = Request(
+ method='get',
+ endpoint='/_admin/log',
+ params=params
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerReadLogError(resp, request)
+ if 'totalAmount' in resp.body:
+ resp.body['total_amount'] = resp.body.pop('totalAmount')
+ return resp.body
+
+ return self._execute(request, response_handler)
def log_levels(self):
- """Return the current logging levels.
+ """Return current logging levels.
- :return: the current logging levels
+ :return: Current logging levels.
:rtype: dict
-
- .. note::
- This method is only compatible with ArangoDB version 3.1+ only.
"""
- res = self._conn.get('/_admin/log/level')
- if res.status_code not in HTTP_OK:
- raise ServerLogLevelError(res)
- return res.body
+ request = Request(
+ method='get',
+ endpoint='/_admin/log/level'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerLogLevelError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
def set_log_levels(self, **kwargs):
"""Set the logging levels.
@@ -337,219 +595,203 @@ def set_log_levels(self, **kwargs):
.. code-block:: python
- arango.set_log_level(
+ arango.set_log_levels(
agency='DEBUG',
collector='INFO',
threads='WARNING'
)
- :return: the new logging levels
+ Keys that are not valid logger names are ignored.
+
+ :return: New logging levels.
:rtype: dict
+ """
+ request = Request(
+ method='put',
+ endpoint='/_admin/log/level',
+ data=kwargs
+ )
- .. note::
- Keys that are not valid logger names are simply ignored.
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerLogLevelSetError(resp, request)
+ return resp.body
- .. note::
- This method is only compatible with ArangoDB version 3.1+ only.
- """
- res = self._conn.put('/_admin/log/level', data=kwargs)
- if res.status_code not in HTTP_OK:
- raise ServerLogLevelSetError(res)
- return res.body
+ return self._execute(request, response_handler)
def reload_routing(self):
- """Reload the routing information from the collection *routing*.
+ """Reload the routing information.
- :returns: whether the routing was reloaded successfully
+ :return: True if routing was reloaded successfully.
:rtype: bool
- :raises arango.exceptions.ServerReloadRoutingError: if the routing
- cannot be reloaded
+ :raise arango.exceptions.ServerReloadRoutingError: If reload fails.
"""
- res = self._conn.post('/_admin/routing/reload')
- if res.status_code not in HTTP_OK:
- raise ServerReloadRoutingError(res)
- return not res.body['error']
+ request = Request(
+ method='post',
+ endpoint='/_admin/routing/reload'
+ )
- def async(self, return_result=True):
- """Return the asynchronous request object.
+ def response_handler(resp):
+ if not resp.is_success:
+ raise ServerReloadRoutingError(resp, request)
+ return 'error' not in resp.body
- Refer to :class:`arango.async.AsyncExecution` for more information.
+ return self._execute(request, response_handler)
- :param return_result: store and return the result
- :type return_result: bool
- :returns: the async request object
- :rtype: arango.async.AsyncExecution
+ #######################
+ # Database Management #
+ #######################
- .. warning::
- This method will be deprecated in the future! Use
- :func:`arango.database.Database.asynchronous` instead.
+ def databases(self):
+ """Return the names all databases.
+
+ :return: Database names.
+ :rtype: [str | unicode]
+ :raise arango.exceptions.DatabaseListError: If retrieval fails.
"""
- return AsyncExecution(self._conn, return_result)
+ request = Request(
+ method='get',
+ endpoint='/_api/database'
+ )
- def asynchronous(self, return_result=True):
- """Return the asynchronous request object.
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DatabaseListError(resp, request)
+ return resp.body['result']
- Refer to :class:`arango.async.AsyncExecution` for more information.
+ return self._execute(request, response_handler)
- :param return_result: store and return the result
- :type return_result: bool
- :returns: the async request object
- :rtype: arango.async.AsyncExecution
+ def has_database(self, name):
+ """Check if a database exists.
+
+ :param name: Database name.
+ :type name: str | unicode
+ :return: True if database exists, False otherwise.
+ :rtype: bool
"""
- return AsyncExecution(self._conn, return_result)
+ return name in self.databases()
- def batch(self, return_result=True, commit_on_error=True):
- """Return the batch request object.
+ def create_database(self, name, users=None):
+ """Create a new database.
- Refer to :class:`arango.batch.BatchExecution` for more information.
+ :param name: Database name.
+ :type name: str | unicode
+ :param users: List of users with access to the new database, where each
+ user is a dictionary with fields "username", "password", "active"
+ and "extra" (see below for example). If not set, only the admin and
+ current user are granted access.
+ :type users: [dict]
+ :return: True if database was created successfully.
+ :rtype: bool
+ :raise arango.exceptions.DatabaseCreateError: If create fails.
- :param return_result: store and return the result
- :type return_result: bool
- :param commit_on_error: commit when an exception is raised
- (this is only applicable when context managers are used)
- :returns: the batch request object
- :rtype: arango.batch.BatchExecution
- """
- return BatchExecution(self._conn, return_result, commit_on_error)
-
- def transaction(self,
- read=None,
- write=None,
- sync=None,
- timeout=None,
- commit_on_error=True):
- """Return the transaction object.
-
- Refer to :class:`arango.transaction.Transaction` for more information.
-
- :param read: the name(s) of the collection(s) to read from
- :type read: str | unicode | list
- :param write: the name(s) of the collection(s) to write to
- :type write: str | unicode | list
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :param timeout: timeout on the collection locks
- :type timeout: int
- :param commit_on_error: only applicable when *context managers* are
- used to execute the transaction: if ``True``, the requests
- queued so far are committed even if an exception is raised before
- exiting out of the context
- :type commit_on_error: bool
+ Here is an example entry for parameter **users**:
+
+ .. code-block:: python
+
+ {
+ 'username': 'john',
+ 'password': 'password',
+ 'active': True,
+ 'extra': {'Department': 'IT'}
+ }
"""
- return Transaction(
- connection=self._conn,
- read=read,
- write=write,
- timeout=timeout,
- sync=sync,
- commit_on_error=commit_on_error
+ data = {'name': name}
+ if users is not None:
+ data['users'] = [{
+ 'username': user['username'],
+ 'passwd': user['password'],
+ 'active': user.get('active', True),
+ 'extra': user.get('extra', {})
+ } for user in users]
+
+ request = Request(
+ method='post',
+ endpoint='/_api/database',
+ data=data
)
- def cluster(self, shard_id, transaction_id=None, timeout=None, sync=None):
- """Return the cluster round-trip test object.
+ def response_handler(resp):
+ if not resp.is_success:
+ raise DatabaseCreateError(resp, request)
+ return True
- :param shard_id: the ID of the shard to which the request is sent
- :type shard_id: str | unicode | int
- :param transaction_id: the transaction ID for the request
- :type transaction_id: str | unicode | int
- :param timeout: the timeout in seconds for the cluster operation, where
- an error is returned if the response does not arrive within the
- given limit (default: 24 hrs)
- :type timeout: int
- :param sync: if set to ``True``, the test uses synchronous mode,
- otherwise asynchronous mode is used (this is mainly for debugging
- purposes)
- :param sync: bool
+ return self._execute(request, response_handler)
+
+ def delete_database(self, name, ignore_missing=False):
+ """Delete the database.
+
+ :param name: Database name.
+ :type name: str | unicode
+ :param ignore_missing: Do not raise an exception on missing database.
+ :type ignore_missing: bool
+ :return: True if database was deleted successfully, False if database
+ was not found and **ignore_missing** was set to True.
+ :rtype: bool
+ :raise arango.exceptions.DatabaseDeleteError: If delete fails.
"""
- return ClusterTest(
- connection=self._conn,
- shard_id=shard_id,
- transaction_id=transaction_id,
- timeout=timeout,
- sync=sync
+ request = Request(
+ method='delete',
+ endpoint='/_api/database/{}'.format(name)
)
- def properties(self):
- """Return the database properties.
+ def response_handler(resp):
+ if resp.error_code == 1228 and ignore_missing:
+ return False
+ if not resp.is_success:
+ raise DatabaseDeleteError(resp, request)
+ return resp.body['result']
- :returns: the database properties
- :rtype: dict
- :raises arango.exceptions.DatabasePropertiesError: if the properties
- of the database cannot be retrieved
- """
- res = self._conn.get('/_api/database/current')
- if res.status_code not in HTTP_OK:
- raise DatabasePropertiesError(res)
- result = res.body['result']
- result['system'] = result.pop('isSystem')
- return result
-
- def get_document(self, id, rev=None, match_rev=True):
- """Retrieve a document by its ID (collection/key)
-
- :param id: the document ID
- :type id: str | unicode
- :returns: the document or ``None`` if the document is missing
- :rtype: dict
- :param rev: the revision to compare with that of the retrieved document
- :type rev: str | unicode
- :param match_rev: if ``True``, check if the given revision and
- the target document's revisions are the same, otherwise check if
- the revisions are different (this flag has an effect only when
- **rev** is given)
- :type match_rev: bool
- :raises arango.exceptions.DocumentRevisionError: if the given revision
- does not match the revision of the retrieved document
- :raises arango.exceptions.DocumentGetError: if the document cannot
- be retrieved from the collection
- """
- res = self._conn.get(
- '/_api/document/{}'.format(id),
- headers=(
- {'If-Match' if match_rev else 'If-None-Match': rev}
- if rev is not None else {}
- )
- )
- if res.status_code in {304, 412}:
- raise DocumentRevisionError(res)
- elif res.status_code == 404 and res.error_code == 1202:
- return None
- elif res.status_code in HTTP_OK:
- return res.body
- raise DocumentGetError(res)
+ return self._execute(request, response_handler)
#########################
# Collection Management #
#########################
+ def collection(self, name):
+ """Return the standard collection API wrapper.
+
+ :param name: Collection name.
+ :type name: str | unicode
+ :return: Standard collection API wrapper.
+ :rtype: arango.collection.StandardCollection
+ """
+ return StandardCollection(self._conn, self._executor, name)
+
+ def has_collection(self, name):
+ """Check if collection exists in the database.
+
+ :param name: Collection name.
+ :type name: str | unicode
+ :return: True if collection exists, False otherwise.
+ :rtype: bool
+ """
+ return any(col['name'] == name for col in self.collections())
+
def collections(self):
"""Return the collections in the database.
- :returns: the details of the collections in the database
+ :return: Collections in the database and their details.
:rtype: [dict]
- :raises arango.exceptions.CollectionListError: if the list of
- collections cannot be retrieved
- """
- res = self._conn.get('/_api/collection')
- if res.status_code not in HTTP_OK:
- raise CollectionListError(res)
- return [{
- 'id': col['id'],
- 'name': col['name'],
- 'system': col['isSystem'],
- 'type': Collection.TYPES[col['type']],
- 'status': Collection.STATUSES[col['status']],
- } for col in map(dict, res.body['result'])]
+ :raise arango.exceptions.CollectionListError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/collection'
+ )
- def collection(self, name):
- """Return the collection object.
+ def response_handler(resp):
+ if not resp.is_success:
+ raise CollectionListError(resp, request)
+ return [{
+ 'id': col['id'],
+ 'name': col['name'],
+ 'system': col['isSystem'],
+ 'type': StandardCollection.types[col['type']],
+ 'status': StandardCollection.statuses[col['status']],
+ } for col in map(dict, resp.body['result'])]
- :param name: the name of the collection
- :type name: str | unicode
- :returns: the collection object
- :rtype: arango.collections.Collection
- """
- return Collection(self._conn, name)
+ return self._execute(request, response_handler)
def create_collection(self,
name,
@@ -562,72 +804,68 @@ def create_collection(self,
user_keys=True,
key_increment=None,
key_offset=None,
- key_generator="traditional",
+ key_generator='traditional',
shard_fields=None,
shard_count=None,
index_bucket_count=None,
replication_factor=None):
"""Create a new collection.
- .. note::
-
- Starting from ArangoDB version 3.1+, system collections must have
- a name with a leading underscore ``_`` character.
-
- :param name: the name of the collection
+ :param name: Collection name.
:type name: str | unicode
- :param sync: wait for the operation to sync to disk
+ :param sync: If set to True, document operations via the collection
+ will block until synchronized to disk by default.
:type sync: bool
- :param compact: compact the collection
+ :param compact: If set to True, the collection is compacted. Applies
+ only to MMFiles storage engine.
:type compact: bool
- :param system: the collection is a system collection
+ :param system: If set to True, a system collection is created. The
+ collection name must have leading underscore "_" character.
:type system: bool
- :param journal_size: the max size of the journal
+ :param journal_size: Max size of the journal in bytes.
:type journal_size: int
- :param edge: the collection is an edge collection
+ :param edge: If set to True, an edge collection is created.
:type edge: bool
- :param volatile: the collection is in-memory only
+ :param volatile: If set to True, collection data is kept in-memory only
+ and not made persistent. Unloading the collection will cause the
+ collection data to be discarded. Stopping or re-starting the server
+ will also cause full loss of data.
:type volatile: bool
- :param key_generator: "traditional" or "autoincrement"
+ :param key_generator: Used for generating document keys. Allowed values
+ are "traditional" or "autoincrement".
:type key_generator: str | unicode
- :param user_keys: allow users to supply keys
+ :param user_keys: If set to True, users are allowed to supply document
+ keys. If set to False, the key generator is solely responsible for
+ supplying the key values.
:type user_keys: bool
- :param key_increment: the increment value (autoincrement only)
+ :param key_increment: Key increment value. Applies only when value of
+ **key_generator** is set to "autoincrement".
:type key_increment: int
- :param key_offset: the offset value (autoincrement only)
+ :param key_offset: Key offset value. Applies only when value of
+ **key_generator** is set to "autoincrement".
:type key_offset: int
- :param shard_fields: the field(s) used to determine the target shard
- :type shard_fields: list
- :param shard_count: the number of shards to create
+ :param shard_fields: Field(s) used to determine the target shard.
+ :type shard_fields: [str | unicode]
+ :param shard_count: Number of shards to create.
:type shard_count: int
- :param index_bucket_count: the number of buckets into which indexes
- using a hash table are split (the default is 16 and this number
- has to be a power of 2 and less than or equal to 1024); for very
- large collections one should increase this to avoid long pauses
- when the hash table has to be initially built or re-sized, since
- buckets are re-sized individually and can be initially built in
- parallel (e.g. 64 might be a sensible value for a collection with
- 100,000,000 documents.
+ :param index_bucket_count: Number of buckets into which indexes using
+ hash tables are split. The default is 16, and this number has to be
+ a power of 2 and less than or equal to 1024. For large collections,
+ one should increase this to avoid long pauses when the hash table
+ has to be initially built or re-sized, since buckets are re-sized
+ individually and can be initially built in parallel. For instance,
+ 64 may be a sensible value for 100 million documents.
:type index_bucket_count: int
- :param replication_factor: the number of copies of each shard on
- different servers in a cluster, whose allowed values are:
-
- .. code-block:: none
-
- 1: only one copy is kept (no synchronous replication).
-
- k: k-1 replicas are kept and any two copies are replicated
- across different DBServers synchronously, meaning every
- write to the master is copied to all slaves before the
- operation is reported successful.
-
- Default: ``1``.
-
+ :param replication_factor: Number of copies of each shard on different
+ servers in a cluster. Allowed values are 1 (only one copy is kept
+ and no synchronous replication), and n (n-1 replicas are kept and
+ any two copies are replicated across servers synchronously, meaning
+ every write to the master is copied to all slaves before operation
+ is reported successful).
:type replication_factor: int
- :returns: the new collection object
- :rtype: arango.collections.Collection
- :raises arango.exceptions.CollectionCreateError: if the collection
- cannot be created in the database
+ :return: Standard collection API wrapper.
+ :rtype: arango.collection.StandardCollection
+ :raise arango.exceptions.CollectionCreateError: If create fails.
"""
key_options = {'type': key_generator, 'allowUserKeys': user_keys}
if key_increment is not None:
@@ -641,9 +879,14 @@ def create_collection(self,
'doCompact': compact,
'isSystem': system,
'isVolatile': volatile,
- 'type': 3 if edge else 2,
'keyOptions': key_options
}
+
+ if edge:
+ data['type'] = 3
+ else:
+ data['type'] = 2
+
if journal_size is not None:
data['journalSize'] = journal_size
if shard_count is not None:
@@ -655,74 +898,111 @@ def create_collection(self,
if replication_factor is not None:
data['replicationFactor'] = replication_factor
- res = self._conn.post('/_api/collection', data=data)
- if res.status_code not in HTTP_OK:
- raise CollectionCreateError(res)
- return self.collection(name)
+ request = Request(
+ method='post',
+ endpoint='/_api/collection',
+ data=data
+ )
+
+ def response_handler(resp):
+ if resp.is_success:
+ return self.collection(name)
+ raise CollectionCreateError(resp, request)
+
+ return self._execute(request, response_handler)
def delete_collection(self, name, ignore_missing=False, system=None):
- """Delete a collection.
+ """Delete the collection.
- :param name: the name of the collection to delete
+ :param name: Collection name.
:type name: str | unicode
- :param ignore_missing: do not raise if the collection is missing
+ :param ignore_missing: Do not raise an exception on missing collection.
:type ignore_missing: bool
- :param system: whether the collection is a system collection (this
- option is only available with ArangoDB 3.1+, lower versions do
- distinguish between system or non-system collections)
+ :param system: Whether the collection is a system collection.
:type system: bool
- :returns: whether the deletion was successful
+ :return: True if collection was deleted successfully, False if
+ collection was not found and **ignore_missing** was set to True.
:rtype: bool
- :raises arango.exceptions.CollectionDeleteError: if the collection
- cannot be deleted from the database
+ :raise arango.exceptions.CollectionDeleteError: If delete fails.
"""
- res = self._conn.delete(
- '/_api/collection/{}'.format(name),
- params={'isSystem': system}
- if system is not None else None # pragma: no cover
+ params = {}
+ if system is not None:
+ params['isSystem'] = system
+
+ request = Request(
+ method='delete',
+ endpoint='/_api/collection/{}'.format(name),
+ params=params
)
- if res.status_code not in HTTP_OK:
- if not (res.status_code == 404 and ignore_missing):
- raise CollectionDeleteError(res)
- return not res.body['error']
+
+ def response_handler(resp):
+ if resp.error_code == 1203 and ignore_missing:
+ return False
+ if not resp.is_success:
+ raise CollectionDeleteError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
####################
# Graph Management #
####################
- def graphs(self):
- """List all graphs in the database.
+ def graph(self, name):
+ """Return the graph API wrapper.
- :returns: the graphs in the database
- :rtype: dict
- :raises arango.exceptions.GraphListError: if the list of graphs
- cannot be retrieved
+ :param name: Graph name.
+ :type name: str | unicode
+ :return: Graph API wrapper.
+ :rtype: arango.graph.Graph
"""
- res = self._conn.get('/_api/gharial')
- if res.status_code not in HTTP_OK:
- raise GraphListError(res)
+ return Graph(self._conn, self._executor, name)
- return [
- {
- 'name': record['_key'],
- 'revision': record['_rev'],
- 'edge_definitions': record['edgeDefinitions'],
- 'orphan_collections': record['orphanCollections'],
- 'smart': record.get('isSmart'),
- 'smart_field': record.get('smartGraphAttribute'),
- 'shard_count': record.get('numberOfShards')
- } for record in map(dict, res.body['graphs'])
- ]
+ def has_graph(self, name):
+ """Check if a graph exists in the database.
- def graph(self, name):
- """Return the graph object.
-
- :param name: the name of the graph
+ :param name: Graph name.
:type name: str | unicode
- :returns: the requested graph object
- :rtype: arango.graph.Graph
+ :return: True if graph exists, False otherwise.
+ :rtype: bool
"""
- return Graph(self._conn, name)
+ for graph in self.graphs():
+ if graph['name'] == name:
+ return True
+ return False
+
+ def graphs(self):
+ """List all graphs in the database.
+
+ :return: Graphs in the database.
+ :rtype: [dict]
+ :raise arango.exceptions.GraphListError: If retrieval fails.
+ """
+ request = Request(method='get', endpoint='/_api/gharial')
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise GraphListError(resp, request)
+ return [
+ {
+ 'id': body['_id'],
+ 'name': body['_key'],
+ 'revision': body['_rev'],
+ 'orphan_collections': body['orphanCollections'],
+ 'edge_definitions': [
+ {
+ 'edge_collection': definition['collection'],
+ 'from_vertex_collections': definition['from'],
+ 'to_vertex_collections': definition['to'],
+ }
+ for definition in body['edgeDefinitions']
+ ],
+ 'shard_count': body.get('numberOfShards'),
+ 'replication_factor': body.get('replicationFactor')
+ } for body in resp.body['graphs']
+ ]
+
+ return self._execute(request, response_handler)
def create_graph(self,
name,
@@ -731,48 +1011,53 @@ def create_graph(self,
smart=None,
smart_field=None,
shard_count=None):
- """Create a new graph in the database.
-
- An edge definition should look like this:
-
- .. code-block:: python
-
- {
- 'name': 'edge_collection_name',
- 'from_collections': ['from_vertex_collection_name'],
- 'to_collections': ['to_vertex_collection_name']
- }
+ """Create a new graph.
- :param name: The name of the new graph.
+ :param name: Graph name.
:type name: str | unicode
- :param edge_definitions: The list of edge definitions.
- :type edge_definitions: list
- :param orphan_collections: The names of additional vertex collections.
- :type orphan_collections: list
- :param smart: Whether or not the graph is smart. Set this to ``True``
- to enable sharding (see parameter **smart_field** below). This
- parameter only has an effect for the enterprise version of ArangoDB.
+ :param edge_definitions: List of edge definitions, where each edge
+ definition entry is a dictionary with fields "edge_collection",
+ "from_vertex_collections" and "to_vertex_collections" (see below
+ for example).
+ :type edge_definitions: [dict]
+ :param orphan_collections: Names of additional vertex collections that
+ are not in edge definitions.
+ :type orphan_collections: [str | unicode]
+ :param smart: If set to True, sharding is enabled (see parameter
+ **smart_field** below). Applies only to enterprise version of
+ ArangoDB.
:type smart: bool
- :param smart_field: The document field used to shard the vertices of
- the graph. To use this option, parameter **smart** must be set to
- ``True`` and every vertex in the graph must contain the smart field.
+ :param smart_field: Document field used to shard the vertices of the
+ graph. To use this, parameter **smart** must be set to True and
+ every vertex in the graph must have the smart field. Applies only
+ to enterprise version of ArangoDB.
:type smart_field: str | unicode
- :param shard_count: The number of shards used for every collection in
- the graph. To use this option, parameter **smart** must be set to
- ``True`` and every vertex in the graph must contain the smart field.
- This number cannot be modified later once set.
+ :param shard_count: Number of shards used for every collection in the
+ graph. To use this, parameter **smart** must be set to True and
+ every vertex in the graph must have the smart field. This number
+ cannot be modified later once set. Applies only to enterprise
+ version of ArangoDB.
:type shard_count: int
- :returns: the graph object
+ :return: Graph API wrapper.
:rtype: arango.graph.Graph
- :raises arango.exceptions.GraphCreateError: if the graph cannot be
- created in the database
+ :raise arango.exceptions.GraphCreateError: If create fails.
+
+ Here is an example entry for parameter **edge_definitions**:
+
+ .. code-block:: python
+
+ {
+ 'edge_collection': 'teach',
+ 'from_vertex_collections': ['teachers'],
+ 'to_vertex_collections': ['lectures']
+ }
"""
data = {'name': name}
if edge_definitions is not None:
data['edgeDefinitions'] = [{
- 'collection': definition['name'],
- 'from': definition['from_collections'],
- 'to': definition['to_collections']
+ 'collection': definition['edge_collection'],
+ 'from': definition['from_vertex_collections'],
+ 'to': definition['to_vertex_collections']
} for definition in edge_definitions]
if orphan_collections is not None:
data['orphanCollections'] = orphan_collections
@@ -783,76 +1068,318 @@ def create_graph(self,
if shard_count is not None: # pragma: no cover
data['numberOfShards'] = shard_count
- res = self._conn.post('/_api/gharial', data=data)
- if res.status_code not in HTTP_OK:
- raise GraphCreateError(res)
- return Graph(self._conn, name)
+ request = Request(
+ method='post',
+ endpoint='/_api/gharial',
+ data=data
+ )
+
+ def response_handler(resp):
+ if resp.is_success:
+ return Graph(self._conn, self._executor, name)
+ raise GraphCreateError(resp, request)
+
+ return self._execute(request, response_handler)
def delete_graph(self, name, ignore_missing=False, drop_collections=None):
"""Drop the graph of the given name from the database.
- :param name: The name of the graph to delete/drop.
+ :param name: Graph name.
:type name: str | unicode
- :param ignore_missing: Ignore HTTP 404 (graph not found) from the
- server. If this is set to ``True`` an exception is not raised.
+ :param ignore_missing: Do not raise an exception on missing graph.
:type ignore_missing: bool
- :param drop_collections: Whether to drop the collections of the graph
- as well. The collections can only be dropped if they are not in use
- by other graphs.
+ :param drop_collections: Drop the collections of the graph also. This
+ is only if they are not in use by other graphs.
:type drop_collections: bool
- :returns: Whether the deletion was successful.
+ :return: True if graph was deleted successfully, False if graph was not
+ found and **ignore_missing** was set to True.
:rtype: bool
- :raises arango.exceptions.GraphDeleteError: if the graph cannot be
- deleted from the database
+ :raise arango.exceptions.GraphDeleteError: If delete fails.
"""
params = {}
if drop_collections is not None:
params['dropCollections'] = drop_collections
- res = self._conn.delete(
- '/_api/gharial/{}'.format(name),
+ request = Request(
+ method='delete',
+ endpoint='/_api/gharial/{}'.format(name),
params=params
)
- if res.status_code not in HTTP_OK:
- if not (res.status_code == 404 and ignore_missing):
- raise GraphDeleteError(res)
- return not res.body['error']
+
+ def response_handler(resp):
+ if resp.error_code == 1924 and ignore_missing:
+ return False
+ if not resp.is_success:
+ raise GraphDeleteError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ #######################
+ # Document Management #
+ #######################
+
+ def has_document(self, document, rev=None, check_rev=True):
+ """Check if a document exists.
+
+ :param document: Document ID or body with "_id" field.
+ :type document: str | unicode | dict
+ :param rev: Expected document revision. Overrides value of "_rev" field
+ in **document** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :return: True if document exists, False otherwise.
+ :rtype: bool
+ :raise arango.exceptions.DocumentInError: If check fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_doc(document).has(
+ document=document,
+ rev=rev,
+ check_rev=check_rev
+ )
+
+ def document(self, document, rev=None, check_rev=True):
+ """Return a document.
+
+ :param document: Document ID or body with "_id" field.
+ :type document: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **document** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :return: Document, or None if not found.
+ :rtype: dict | None
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_doc(document).get(
+ document=document,
+ rev=rev,
+ check_rev=check_rev
+ )
+
+ def insert_document(self,
+ collection,
+ document,
+ return_new=False,
+ sync=None,
+ silent=False):
+ """Insert a new document.
+
+ :param collection: Collection name.
+ :type collection: str | unicode
+ :param document: Document to insert. If it contains the "_key" or "_id"
+ field, the value is used as the key of the new document (otherwise
+ it is auto-generated). Any "_rev" field is ignored.
+ :type document: dict
+ :param return_new: Include body of the new document in the returned
+ metadata. Ignored if parameter **silent** is set to True.
+ :type return_new: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentInsertError: If insert fails.
+ """
+ return self.collection(collection).insert(
+ document=document,
+ return_new=return_new,
+ sync=sync,
+ silent=silent
+ )
+
+ def update_document(self,
+ document,
+ check_rev=True,
+ merge=True,
+ keep_none=True,
+ return_new=False,
+ return_old=False,
+ sync=None,
+ silent=False):
+ """Update a document.
+
+ :param document: Partial or full document with the updated values. It
+ must contain the "_id" field.
+ :type document: dict
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :param merge: If set to True, sub-dictionaries are merged instead of
+ the new one overwriting the old one.
+ :type merge: bool
+ :param keep_none: If set to True, fields with value None are retained
+ in the document. Otherwise, they are removed completely.
+ :type keep_none: bool
+ :param return_new: Include body of the new document in the result.
+ :type return_new: bool
+ :param return_old: Include body of the old document in the result.
+ :type return_old: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentUpdateError: If update fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_doc(document).update(
+ document=document,
+ check_rev=check_rev,
+ merge=merge,
+ keep_none=keep_none,
+ return_new=return_new,
+ return_old=return_old,
+ sync=sync,
+ silent=silent
+ )
+
+ def replace_document(self,
+ document,
+ check_rev=True,
+ return_new=False,
+ return_old=False,
+ sync=None,
+ silent=False):
+ """Replace a document.
+
+ :param document: New document to replace the old one with. It must
+ contain the "_id" field. Edge document must also have "_from" and
+ "_to" fields.
+ :type document: dict
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :param return_new: Include body of the new document in the result.
+ :type return_new: bool
+ :param return_old: Include body of the old document in the result.
+ :type return_old: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentReplaceError: If replace fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_doc(document).replace(
+ document=document,
+ check_rev=check_rev,
+ return_new=return_new,
+ return_old=return_old,
+ sync=sync,
+ silent=silent
+ )
+
+ def delete_document(self,
+ document,
+ rev=None,
+ check_rev=True,
+ ignore_missing=False,
+ return_old=False,
+ sync=None,
+ silent=False):
+ """Delete a document.
+
+ :param document: Document ID, key or body. Document body must contain
+ the "_id" field.
+ :type document: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **document** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **document** (if given)
+ is compared against the revision of target document.
+ :type check_rev: bool
+ :param ignore_missing: Do not raise an exception on missing document.
+ This parameter has no effect in transactions where an exception is
+ always raised on failures.
+ :type ignore_missing: bool
+ :param return_old: Include body of the old document in the result.
+ :type return_old: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision), or True if
+ parameter **silent** was set to True, or False if document was not
+ found and **ignore_missing** was set to True (does not apply in
+ transactions).
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentDeleteError: If delete fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_doc(document).delete(
+ document=document,
+ rev=rev,
+ check_rev=check_rev,
+ ignore_missing=ignore_missing,
+ return_old=return_old,
+ sync=sync,
+ silent=silent
+ )
###################
# Task Management #
###################
def tasks(self):
- """Return all server tasks that are currently active.
+ """Return all currently active server tasks.
- :returns: the server tasks that are currently active
+ :return: Currently active server tasks.
:rtype: [dict]
- :raises arango.exceptions.TaskListError: if the list of active server
- tasks cannot be retrieved from the server
+ :raise arango.exceptions.TaskListError: If retrieval fails.
"""
- res = self._conn.get('/_api/tasks')
- if res.status_code not in HTTP_OK:
- raise TaskListError(res)
- return res.body
+ request = Request(
+ method='get',
+ endpoint='/_api/tasks'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise TaskListError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
def task(self, task_id):
- """Return the active server task with the given id.
+ """Return the details of an active server task.
- :param task_id: the ID of the server task
+ :param task_id: Server task ID.
:type task_id: str | unicode
- :returns: the details on the active server task
+ :return: Server task details.
:rtype: dict
- :raises arango.exceptions.TaskGetError: if the task cannot be retrieved
- from the server
+ :raise arango.exceptions.TaskGetError: If retrieval fails.
"""
- res = self._conn.get('/_api/tasks/{}'.format(task_id))
- if res.status_code not in HTTP_OK:
- raise TaskGetError(res)
- res.body.pop('code', None)
- res.body.pop('error', None)
- return res.body
+ request = Request(
+ method='get',
+ endpoint='/_api/tasks/{}'.format(task_id)
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise TaskGetError(resp, request)
+ resp.body.pop('code', None)
+ resp.body.pop('error', None)
+ return resp.body
+
+ return self._execute(request, response_handler)
- # TODO verify which arguments are optional
def create_task(self,
name,
command,
@@ -862,146 +1389,190 @@ def create_task(self,
task_id=None):
"""Create a new server task.
- :param name: the name of the server task
+ :param name: Name of the server task.
:type name: str | unicode
- :param command: the Javascript code to execute
+ :param command: Javascript command to execute.
:type command: str | unicode
- :param params: the parameters passed into the command
+ :param params: Optional parameters passed into the Javascript command.
:type params: dict
- :param period: the number of seconds to wait between executions (if
- set to 0, the new task will be ``"timed"``, which means it will
- execute only once and be deleted automatically afterwards
+ :param period: Number of seconds to wait between executions. If set
+ to 0, the new task will be "timed", meaning it will execute only
+ once and be deleted afterwards.
:type period: int
- :param offset: the initial delay before execution in seconds
+ :param offset: Initial delay before execution in seconds.
:type offset: int
- :param task_id: pre-defined ID for the new server task
+ :param task_id: Pre-defined ID for the new server task.
:type task_id: str | unicode
- :returns: the details on the new task
+ :return: Details of the new task.
:rtype: dict
- :raises arango.exceptions.TaskCreateError: if the task cannot be
- created on the server
+ :raise arango.exceptions.TaskCreateError: If create fails.
"""
- data = {
- 'name': name,
- 'command': command,
- 'params': params if params else {},
- }
+ data = {'name': name, 'command': command}
+ if params is not None:
+ data['params'] = params
if task_id is not None:
data['id'] = task_id
if period is not None:
data['period'] = period
if offset is not None:
data['offset'] = offset
- res = self._conn.post(
- '/_api/tasks/{}'.format(task_id if task_id else ''),
+
+ if task_id is None:
+ task_id = ''
+
+ request = Request(
+ method='post',
+ endpoint='/_api/tasks/{}'.format(task_id),
data=data
)
- if res.status_code not in HTTP_OK:
- raise TaskCreateError(res)
- res.body.pop('code', None)
- res.body.pop('error', None)
- return res.body
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise TaskCreateError(resp, request)
+ resp.body.pop('code', None)
+ resp.body.pop('error', None)
+ return resp.body
+
+ return self._execute(request, response_handler)
def delete_task(self, task_id, ignore_missing=False):
- """Delete the server task specified by ID.
+ """Delete a server task.
- :param task_id: the ID of the server task
+ :param task_id: Server task ID.
:type task_id: str | unicode
- :param ignore_missing: ignore missing tasks
+ :param ignore_missing: Do not raise an exception on missing task.
:type ignore_missing: bool
- :returns: whether the deletion was successful
+ :return: True if task was successfully deleted, False if task was not
+ found and **ignore_missing** was set to True.
:rtype: bool
- :raises arango.exceptions.TaskDeleteError: when the task cannot be
- deleted from the server
+ :raise arango.exceptions.TaskDeleteError: If delete fails.
"""
- res = self._conn.delete('/_api/tasks/{}'.format(task_id))
- if res.status_code not in HTTP_OK:
- if not (res.status_code == 404 and ignore_missing):
- raise TaskDeleteError(res)
- return not res.body['error']
+ request = Request(
+ method='delete',
+ endpoint='/_api/tasks/{}'.format(task_id)
+ )
+
+ def response_handler(resp):
+ if resp.error_code == 1852 and ignore_missing:
+ return False
+ if not resp.is_success:
+ raise TaskDeleteError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
###################
# User Management #
###################
+ def has_user(self, username):
+ """Check if user exists.
+
+ :param username: Username.
+ :type username: str | unicode
+ :return: True if user exists, False otherwise.
+ :rtype: bool
+ """
+ return any(user['username'] == username for user in self.users())
+
def users(self):
- """Return the details of all users.
+ """Return all user details.
- :returns: the details of all users
+ :return: List of user details.
:rtype: [dict]
- :raises arango.exceptions.UserListError: if the retrieval fails
+ :raise arango.exceptions.UserListError: If retrieval fails.
"""
- res = self._conn.get('/_api/user')
- if res.status_code not in HTTP_OK:
- raise UserListError(res)
- return [{
- 'username': record['user'],
- 'active': record['active'],
- 'extra': record['extra'],
- } for record in res.body['result']]
+ request = Request(
+ method='get',
+ endpoint='/_api/user'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise UserListError(resp, request)
+ return [{
+ 'username': record['user'],
+ 'active': record['active'],
+ 'extra': record['extra'],
+ } for record in resp.body['result']]
+
+ return self._execute(request, response_handler)
def user(self, username):
- """Return the details of a user.
+ """Return user details.
- :param username: the details of the user
+ :param username: Username.
:type username: str | unicode
- :returns: the user details
+ :return: User details.
:rtype: dict
- :raises arango.exceptions.UserGetError: if the retrieval fails
- """
- res = self._conn.get('/_api/user/{}'.format(username))
- if res.status_code not in HTTP_OK:
- raise UserGetError(res)
- return {
- 'username': res.body['user'],
- 'active': res.body['active'],
- 'extra': res.body['extra']
- }
+ :raise arango.exceptions.UserGetError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/user/{}'.format(username)
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise UserGetError(resp, request)
+ return {
+ 'username': resp.body['user'],
+ 'active': resp.body['active'],
+ 'extra': resp.body['extra']
+ }
- def create_user(self, username, password, active=None, extra=None):
+ return self._execute(request, response_handler)
+
+ def create_user(self, username, password, active=True, extra=None):
"""Create a new user.
- :param username: the name of the user
+ :param username: Username.
:type username: str | unicode
- :param password: the user's password
+ :param password: Password.
:type password: str | unicode
- :param active: whether the user is active
+ :param active: True if user is active, False otherwise.
:type active: bool
- :param extra: any extra data on the user
+ :param extra: Additional data for the user.
:type extra: dict
- :returns: the details of the new user
+ :return: New user details.
:rtype: dict
- :raises arango.exceptions.UserCreateError: if the user create fails
+ :raise arango.exceptions.UserCreateError: If create fails.
"""
- data = {'user': username, 'passwd': password}
- if active is not None:
- data['active'] = active
+ data = {'user': username, 'passwd': password, 'active': active}
if extra is not None:
data['extra'] = extra
- res = self._conn.post('/_api/user', data=data)
- if res.status_code not in HTTP_OK:
- raise UserCreateError(res)
- return {
- 'username': res.body['user'],
- 'active': res.body['active'],
- 'extra': res.body['extra'],
- }
+ request = Request(
+ method='post',
+ endpoint='/_api/user',
+ data=data
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise UserCreateError(resp, request)
+ return {
+ 'username': resp.body['user'],
+ 'active': resp.body['active'],
+ 'extra': resp.body['extra'],
+ }
+
+ return self._execute(request, response_handler)
def update_user(self, username, password=None, active=None, extra=None):
- """Update an existing user.
+ """Update a user.
- :param username: the name of the existing user
+ :param username: Username.
:type username: str | unicode
- :param password: the user's new password
+ :param password: New password.
:type password: str | unicode
- :param active: whether the user is active
+ :param active: Whether the user is active.
:type active: bool
- :param extra: any extra data on the user
+ :param extra: Additional data for the user.
:type extra: dict
- :returns: the details of the updated user
+ :return: New user details.
:rtype: dict
- :raises arango.exceptions.UserUpdateError: if the user update fails
+ :raise arango.exceptions.UserUpdateError: If update fails.
"""
data = {}
if password is not None:
@@ -1011,32 +1582,37 @@ def update_user(self, username, password=None, active=None, extra=None):
if extra is not None:
data['extra'] = extra
- res = self._conn.patch(
- '/_api/user/{user}'.format(user=username),
+ request = Request(
+ method='patch',
+ endpoint='/_api/user/{user}'.format(user=username),
data=data
)
- if res.status_code not in HTTP_OK:
- raise UserUpdateError(res)
- return {
- 'username': res.body['user'],
- 'active': res.body['active'],
- 'extra': res.body['extra'],
- }
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise UserUpdateError(resp, request)
+ return {
+ 'username': resp.body['user'],
+ 'active': resp.body['active'],
+ 'extra': resp.body['extra'],
+ }
+
+ return self._execute(request, response_handler)
def replace_user(self, username, password, active=None, extra=None):
- """Replace an existing user.
+ """Replace a user.
- :param username: the name of the existing user
+ :param username: Username.
:type username: str | unicode
- :param password: the user's new password
+ :param password: New password.
:type password: str | unicode
- :param active: whether the user is active
+ :param active: Whether the user is active.
:type active: bool
- :param extra: any extra data on the user
+ :param extra: Additional data for the user.
:type extra: dict
- :returns: the details of the replaced user
+ :return: New user details.
:rtype: dict
- :raises arango.exceptions.UserReplaceError: if the user replace fails
+ :raise arango.exceptions.UserReplaceError: If replace fails.
"""
data = {'user': username, 'passwd': password}
if active is not None:
@@ -1044,217 +1620,457 @@ def replace_user(self, username, password, active=None, extra=None):
if extra is not None:
data['extra'] = extra
- res = self._conn.put(
- '/_api/user/{user}'.format(user=username),
+ request = Request(
+ method='put',
+ endpoint='/_api/user/{user}'.format(user=username),
data=data
)
- if res.status_code not in HTTP_OK:
- raise UserReplaceError(res)
- return {
- 'username': res.body['user'],
- 'active': res.body['active'],
- 'extra': res.body['extra'],
- }
+
+ def response_handler(resp):
+ if resp.is_success:
+ return {
+ 'username': resp.body['user'],
+ 'active': resp.body['active'],
+ 'extra': resp.body['extra'],
+ }
+ raise UserReplaceError(resp, request)
+
+ return self._execute(request, response_handler)
def delete_user(self, username, ignore_missing=False):
- """Delete an existing user.
+ """Delete a user.
- :param username: the name of the existing user
+ :param username: Username.
:type username: str | unicode
- :param ignore_missing: ignore missing users
+ :param ignore_missing: Do not raise an exception on missing user.
:type ignore_missing: bool
- :returns: ``True`` if the operation was successful, ``False`` if the
- user was missing but **ignore_missing** was set to ``True``
+ :return: True if user was deleted successfully, False if user was not
+ found and **ignore_missing** was set to True.
:rtype: bool
- :raises arango.exceptions.UserDeleteError: if the user delete fails
+ :raise arango.exceptions.UserDeleteError: If delete fails.
"""
- res = self._conn.delete('/_api/user/{user}'.format(user=username))
- if res.status_code in HTTP_OK:
- return True
- elif res.status_code == 404 and ignore_missing:
- return False
- raise UserDeleteError(res)
+ request = Request(
+ method='delete',
+ endpoint='/_api/user/{user}'.format(user=username)
+ )
+
+ def response_handler(resp):
+ if resp.is_success:
+ return True
+ elif resp.status_code == 404 and ignore_missing:
+ return False
+ raise UserDeleteError(resp, request)
- def user_access(self, username):
- """Return a user's access details for the database.
+ return self._execute(request, response_handler)
+
+ #########################
+ # Permission Management #
+ #########################
- Appropriate permissions are required in order to execute this method.
+ def permissions(self, username):
+ """Return user permissions for all databases and collections.
- :param username: The name of the user.
+ :param username: Username.
:type username: str | unicode
- :returns: The access details (e.g. ``"rw"``, ``None``)
- :rtype: str | unicode | None
- :raises: arango.exceptions.UserAccessError: If the retrieval fails.
+ :return: User permissions for all databases and collections.
+ :rtype: dict
+ :raise: arango.exceptions.PermissionListError: If retrieval fails.
"""
- res = self._conn.get(
- '/_api/user/{}/database/{}'.format(username, self.name),
+ request = Request(
+ method='get',
+ endpoint='/_api/user/{}/database'.format(username),
+ params={'full': True}
)
- if res.status_code in HTTP_OK:
- result = res.body['result'].lower()
- return None if result == 'none' else result
- raise UserAccessError(res)
- def grant_user_access(self, username, database=None):
- """Grant user access to the database.
+ def response_handler(resp):
+ if resp.is_success:
+ return resp.body['result']
+ raise PermissionListError(resp, request)
+
+ return self._execute(request, response_handler)
- Appropriate permissions are required in order to execute this method.
+ def permission(self, username, database, collection=None):
+ """Return user permission for a specific database or collection.
- :param username: The name of the user.
+ :param username: Username.
:type username: str | unicode
- :param database: The name of the database. If a name is not specified,
- the name of the current database is used.
+ :param database: Database name.
:type database: str | unicode
- :returns: Whether the operation was successful or not.
+ :param collection: Collection name.
+ :type collection: str | unicode
+ :return: Permission for given database or collection.
+ :rtype: str | unicode
+ :raise: arango.exceptions.PermissionGetError: If retrieval fails.
+ """
+ endpoint = '/_api/user/{}/database/{}'.format(username, database)
+ if collection is not None:
+ endpoint += '/' + collection
+ request = Request(method='get', endpoint=endpoint)
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise PermissionGetError(resp, request)
+ return resp.body['result']
+
+ return self._execute(request, response_handler)
+
+ def update_permission(self,
+ username,
+ permission,
+ database,
+ collection=None):
+ """Update user permission for a specific database or collection.
+
+ :param username: Username.
+ :type username: str | unicode
+ :param database: Database name.
+ :type database: str | unicode
+ :param collection: Collection name.
+ :type collection: str | unicode
+ :param permission: Allowed values are "rw" (read and write), "ro"
+ (read only) or "none" (no access).
+ :type permission: str | unicode
+ :return: True if access was granted successfully.
:rtype: bool
- :raises arango.exceptions.UserGrantAccessError: If the operation fails.
+ :raise arango.exceptions.PermissionUpdateError: If update fails.
"""
- if database is None:
- database = self.name
-
- res = self._conn.put(
- '/_api/user/{}/database/{}'.format(username, database),
- data={'grant': 'rw'}
+ endpoint = '/_api/user/{}/database/{}'.format(username, database)
+ if collection is not None:
+ endpoint += '/' + collection
+
+ request = Request(
+ method='put',
+ endpoint=endpoint,
+ data={'grant': permission}
)
- if res.status_code in HTTP_OK:
- return True
- raise UserGrantAccessError(res)
- def revoke_user_access(self, username, database=None):
- """Revoke user access to the database.
+ def response_handler(resp):
+ if resp.is_success:
+ return True
+ raise PermissionUpdateError(resp, request)
+
+ return self._execute(request, response_handler)
- Appropriate permissions are required in order to execute this method.
+ def reset_permission(self, username, database, collection=None):
+ """Reset user permission for a specific database or collection.
- :param username: The name of the user.
+ :param username: Username.
:type username: str | unicode
- :param database: The name of the database. If a name is not specified,
- the name of the current database is used.
- :type database: str | unicode | unicode
- :returns: Whether the operation was successful or not.
+ :param database: Database name.
+ :type database: str | unicode
+ :param collection: Collection name.
+ :type collection: str | unicode
+ :return: True if permission was reset successfully.
:rtype: bool
- :raises arango.exceptions.UserRevokeAccessError: If the operation fails.
+ :raise arango.exceptions.PermissionRestError: If reset fails.
"""
- if database is None:
- database = self.name
+ endpoint = '/_api/user/{}/database/{}'.format(username, database)
+ if collection is not None:
+ endpoint += '/' + collection
- res = self._conn.delete(
- '/_api/user/{}/database/{}'.format(username, database)
- )
- if res.status_code in HTTP_OK:
- return True
- raise UserRevokeAccessError(res)
+ request = Request(method='delete', endpoint=endpoint)
+
+ def response_handler(resp):
+ if resp.is_success:
+ return True
+ raise PermissionResetError(resp, request)
+
+ return self._execute(request, response_handler)
########################
# Async Job Management #
########################
def async_jobs(self, status, count=None):
- """Return the IDs of asynchronous jobs with the specified status.
+ """Return IDs of async jobs with given status.
- :param status: The job status (``"pending"`` or ``"done"``).
+ :param status: Job status (e.g. "pending", "done").
:type status: str | unicode
- :param count: The maximum number of job IDs to return.
+ :param count: Max number of job IDs to return.
:type count: int
- :returns: The list of job IDs.
- :rtype: [str]
- :raises arango.exceptions.AsyncJobListError: If the retrieval fails.
+ :return: List of job IDs.
+ :rtype: [str | unicode]
+ :raise arango.exceptions.AsyncJobListError: If retrieval fails.
"""
- res = self._conn.get(
- '/_api/job/{}'.format(status),
- params={} if count is None else {'count': count}
+ params = {}
+ if count is not None:
+ params['count'] = count
+
+ request = Request(
+ method='get',
+ endpoint='/_api/job/{}'.format(status),
+ params=params
)
- if res.status_code not in HTTP_OK:
- raise AsyncJobListError(res)
- return res.body
+
+ def response_handler(resp):
+ if resp.is_success:
+ return resp.body
+ raise AsyncJobListError(resp, request)
+
+ return self._execute(request, response_handler)
def clear_async_jobs(self, threshold=None):
- """Delete asynchronous job results from the server.
+ """Clear async job results from the server.
+
+ Async jobs that are still queued or running are not stopped.
:param threshold: If specified, only the job results created prior to
- the threshold (a unix timestamp) are deleted, otherwise *all* job
+ the threshold (a unix timestamp) are deleted. Otherwise, all job
results are deleted.
:type threshold: int
- :returns: Whether the deletion of results was successful.
+ :return: True if job results were cleared successfully.
:rtype: bool
- :raises arango.exceptions.AsyncJobClearError: If the operation fails.
-
- .. note::
- Async jobs currently queued or running are not stopped.
+ :raise arango.exceptions.AsyncJobClearError: If operation fails.
"""
if threshold is None:
- res = self._conn.delete('/_api/job/all')
+ url = '/_api/job/all'
+ params = None
else:
- res = self._conn.delete(
- '/_api/job/expired',
- params={'stamp': threshold}
- )
- if res.status_code in HTTP_OK:
- return True
- raise AsyncJobClearError(res)
+ url = '/_api/job/expired'
+ params = {'stamp': threshold}
+
+ request = Request(
+ method='delete',
+ endpoint=url,
+ params=params
+ )
- ###############
- # Pregel Jobs #
- ###############
+ def response_handler(resp):
+ if resp.is_success:
+ return True
+ raise AsyncJobClearError(resp, request)
- def create_pregel_job(self, algorithm, graph):
- """Start/create a Pregel job.
+ return self._execute(request, response_handler)
- :param algorithm: The name of the algorithm (e.g. ``"pagerank"``).
- :type algorithm: str | unicode
- :param graph: The name of the graph.
- :type graph: str | unicode
- :returns: The ID of the Pregel job.
- :rtype: int
- :raises arango.exceptions.PregelJobCreateError: If the operation fails.
- """
- res = self._conn.post(
- '/_api/control_pregel',
- data={
- 'algorithm': algorithm,
- 'graphName': graph,
- }
+class StandardDatabase(Database):
+ """Standard database API wrapper.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ """
+
+ def __init__(self, connection):
+ super(StandardDatabase, self).__init__(
+ connection=connection,
+ executor=DefaultExecutor(connection)
)
- if res.status_code in HTTP_OK:
- return res.body
- raise PregelJobCreateError(res)
- def pregel_job(self, job_id):
- """Return the details of a Pregel job.
+ def __repr__(self):
+ return ''.format(self.name)
+
+ def begin_async_execution(self, return_result=True):
+ """Begin async execution.
- :param job_id: The Pregel job ID.
- :type job_id: int
- :returns: The details of the Pregel job.
- :rtype: dict
- :raises arango.exceptions.PregelJobGetError: If the lookup fails.
+ :param return_result: If set to True, API executions return instances
+ of :class:`arango.job.AsyncJob`, which you can use to retrieve
+ results from server once available. If set to False, API executions
+ return None and no results are stored on server.
+ :type return_result: bool
+ :return: Database API wrapper built specifically for async execution.
+ :rtype: arango.database.AsyncDatabase
"""
- res = self._conn.get(
- '/_api/control_pregel/{}'.format(job_id)
+ return AsyncDatabase(self._conn, return_result)
+
+ def begin_batch_execution(self, return_result=True):
+ """Begin batch execution.
+
+ :param return_result: If set to True, API executions return instances
+ of :class:`arango.job.BatchJob` that are populated with results on
+ commit. If set to False, API executions return None and no results
+ are tracked client-side.
+ :type return_result: bool
+ :return: Database API wrapper built specifically for batch execution.
+ :rtype: arango.database.BatchDatabase
+ """
+ return BatchDatabase(self._conn, return_result)
+
+ def begin_transaction(self,
+ return_result=True,
+ timeout=None,
+ sync=None,
+ read=None,
+ write=None):
+ """Begin transaction.
+
+ :param return_result: If set to True, API executions return instances
+ of :class:`arango.job.TransactionJob` that are populated with
+ results on commit. If set to False, API executions return None and
+ no results are tracked client-side.
+ :type return_result: bool
+ :param read: Names of collections read during transaction. If not
+ specified, they are added automatically as jobs are queued.
+ :type read: [str | unicode]
+ :param write: Names of collections written to during transaction.
+ If not specified, they are added automatically as jobs are queued.
+ :type write: [str | unicode]
+ :param timeout: Timeout for waiting on collection locks. If set to 0,
+ ArangoDB server waits indefinitely. If not set, system default
+ value is used.
+ :type timeout: int
+ :param sync: Block until the transaction is synchronized to disk.
+ :type sync: bool
+ :return: Database API wrapper built specifically for transactions.
+ :rtype: arango.database.TransactionDatabase
+ """
+ return TransactionDatabase(
+ connection=self._conn,
+ return_result=return_result,
+ read=read,
+ write=write,
+ timeout=timeout,
+ sync=sync
)
- if res.status_code in HTTP_OK:
- return {
- 'aggregators': res.body['aggregators'],
- 'edge_count': res.body.get('edgeCount'),
- 'gss': res.body['gss'],
- 'received_count': res.body['receivedCount'],
- 'send_count': res.body['sendCount'],
- 'state': res.body['state'],
- 'total_runtime': res.body['totalRuntime'],
- 'vertex_count': res.body.get('vertexCount')
- }
- raise PregelJobGetError(res)
- def delete_pregel_job(self, job_id):
- """Cancel/delete a Pregel job.
- :param job_id: The Pregel job ID.
- :type job_id: int
- :returns: ``True`` if the Pregel job was successfully cancelled.
- :rtype: bool
- :raises arango.exceptions.PregelJobDeleteError: If the deletion fails.
+class AsyncDatabase(Database):
+ """Database API wrapper tailored specifically for async execution.
+
+ See :func:`arango.database.StandardDatabase.begin_async_execution`.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param return_result: If set to True, API executions return instances of
+ :class:`arango.job.AsyncJob`, which you can use to retrieve results
+ from server once available. If set to False, API executions return None
+ and no results are stored on server.
+ :type return_result: bool
+ """
+
+ def __init__(self, connection, return_result):
+ super(AsyncDatabase, self).__init__(
+ connection=connection,
+ executor=AsyncExecutor(connection, return_result)
+ )
+
+ def __repr__(self):
+ return ''.format(self.name)
+
+
+class BatchDatabase(Database):
+ """Database API wrapper tailored specifically for batch execution.
+
+ See :func:`arango.database.StandardDatabase.begin_batch_execution`.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param return_result: If set to True, API executions return instances of
+ :class:`arango.job.BatchJob` that are populated with results on commit.
+ If set to False, API executions return None and no results are tracked
+ client-side.
+ :type return_result: bool
+ """
+
+ def __init__(self, connection, return_result):
+ super(BatchDatabase, self).__init__(
+ connection=connection,
+ executor=BatchExecutor(connection, return_result)
+ )
+
+ def __repr__(self):
+ return ''.format(self.name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exception, *_):
+ if exception is None:
+ self._executor.commit()
+
+ def queued_jobs(self):
+ """Return the queued batch jobs.
+
+ :return: Queued batch jobs or None if **return_result** parameter was
+ set to False during initialization.
+ :rtype: [arango.job.BatchJob] | None
"""
- res = self._conn.delete(
- '/_api/control_pregel/{}'.format(job_id)
+ return self._executor.jobs
+
+ def commit(self):
+ """Execute the queued requests in a single batch API request.
+
+ If **return_result** parameter was set to True during initialization,
+ :class:`arango.job.BatchJob` instances are populated with results.
+
+ :return: Batch jobs, or None if **return_result** parameter was set to
+ False during initialization.
+ :rtype: [arango.job.BatchJob] | None
+ :raise arango.exceptions.BatchStateError: If batch state is invalid
+ (e.g. batch was already committed or the response size did not
+ match expected).
+ :raise arango.exceptions.BatchExecuteError: If commit fails.
+ """
+ return self._executor.commit()
+
+
+class TransactionDatabase(Database):
+ """Database API wrapper tailored specifically for transactions.
+
+ See :func:`arango.database.StandardDatabase.begin_transaction`.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param return_result: If set to True, API executions return instances of
+ :class:`arango.job.TransactionJob` that are populated with results on
+ commit. If set to False, API executions return None and no results are
+ tracked client-side.
+ :type return_result: bool
+ :param read: Names of collections read during transaction.
+ :type read: [str | unicode]
+ :param write: Names of collections written to during transaction.
+ :type write: [str | unicode]
+ :param timeout: Timeout for waiting on collection locks. If set to 0, the
+ ArangoDB server waits indefinitely. If not set, system default value
+ is used.
+ :type timeout: int
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ """
+
+ def __init__(self, connection, return_result, read, write, timeout, sync):
+ super(TransactionDatabase, self).__init__(
+ connection=connection,
+ executor=TransactionExecutor(
+ connection=connection,
+ return_result=return_result,
+ read=read,
+ write=write,
+ timeout=timeout,
+ sync=sync
+ )
)
- if res.status_code in HTTP_OK:
- return True
- raise PregelJobDeleteError(res)
+
+ def __repr__(self):
+ return ''.format(self.name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exception, *_):
+ if exception is None:
+ self._executor.commit()
+
+ def queued_jobs(self):
+ """Return the queued transaction jobs.
+
+ :return: Queued transaction jobs, or None if **return_result** was set
+ to False during initialization.
+ :rtype: [arango.job.TransactionJob] | None
+ """
+ return self._executor.jobs
+
+ def commit(self):
+ """Execute the queued requests in a single transaction API request.
+
+ If **return_result** parameter was set to True during initialization,
+ :class:`arango.job.TransactionJob` instances are populated with
+ results.
+
+ :return: Transaction jobs, or None if **return_result** parameter was
+ set to False during initialization.
+ :rtype: [arango.job.TransactionJob] | None
+ :raise arango.exceptions.TransactionStateError: If the transaction was
+ already committed.
+ :raise arango.exceptions.TransactionExecuteError: If commit fails.
+ """
+ return self._executor.commit()
diff --git a/arango/exceptions.py b/arango/exceptions.py
index 74b6a9a0..98ccd456 100644
--- a/arango/exceptions.py
+++ b/arango/exceptions.py
@@ -1,174 +1,269 @@
from __future__ import absolute_import, unicode_literals
-from six import string_types as string
-from arango.response import Response
+class ArangoError(Exception):
+ """Base class for all exceptions in python-arango."""
-class ArangoError(Exception):
- """Base class for all ArangoDB exceptions.
+class ArangoClientError(ArangoError):
+ """Base class for errors originating from python-arango client.
+
+ :param msg: Error message.
+ :type msg: str | unicode
- :param data: the response object or string
- :type data: arango.response.Response | str | unicode
+ :cvar source: Source of the error (always set to "client").
+ :vartype source: str | unicode
+ :ivar message: Error message.
+ :vartype message: str | unicode
"""
+ source = 'client'
+
+ def __init__(self, msg):
+ super(ArangoClientError, self).__init__(msg)
+ self.message = msg
+ self.error_message = None
+ self.error_code = None
+ self.url = None
+ self.response = None
+ self.request = None
+ self.http_method = None
+ self.http_code = None
+ self.http_headers = None
+
+
+class ArangoServerError(ArangoError):
+ """Base class for errors originating from ArangoDB server.
+
+ :param resp: HTTP response.
+ :type resp: arango.response.Response
+ :param msg: Error message override.
+ :type msg: str | unicode
+
+ :cvar source: Source of the error (always set to "server").
+ :vartype source: str | unicode
+ :ivar message: Exception message.
+ :vartype message: str | unicode
+ :ivar url: API URL.
+ :vartype url: str | unicode
+ :ivar response: HTTP response object.
+ :vartype response: arango.response.Response
+ :ivar request: HTTP request object.
+ :vartype request: arango.request.Request
+ :ivar http_method: HTTP method in lowercase (e.g. "post").
+ :vartype http_method: str | unicode
+ :ivar http_code: HTTP status code.
+ :vartype http_code: int
+ :ivar http_headers: Response headers.
+ :vartype http_headers: requests.structures.CaseInsensitiveDict | dict
+ :ivar error_code: Error code from ArangoDB server.
+ :vartype error_code: int
+ :ivar error_message: Raw error message from ArangoDB server.
+ :vartype error_message: str | unicode
+ """
+ source = 'server'
+
+ def __init__(self, resp, request, msg=None):
+ msg = msg or resp.error_message or resp.status_text
+ self.error_message = resp.error_message
+ self.error_code = resp.error_code
+ if self.error_code is not None:
+ msg = '[HTTP {}][ERR {}] {}'.format(
+ resp.status_code, self.error_code, msg)
+ else:
+ msg = '[HTTP {}] {}'.format(resp.status_code, msg)
+ super(ArangoServerError, self).__init__(msg)
+ self.message = msg
+ self.url = resp.url
+ self.response = resp
+ self.request = request
+ self.http_method = resp.method
+ self.http_code = resp.status_code
+ self.http_headers = resp.headers
- def __init__(self, data, message=None):
- if isinstance(data, Response):
- # Get the ArangoDB error message if provided
- if message is not None:
- error_message = message
- elif data.error_message is not None:
- error_message = data.error_message
- elif data.status_text is not None:
- error_message = data.status_text
- else: # pragma: no cover
- error_message = "request failed"
-
- # Get the ArangoDB error number if provided
- self.error_code = data.error_code
-
- # Build the error message for the exception
- if self.error_code is None:
- error_message = '[HTTP {}] {}'.format(
- data.status_code,
- error_message
- )
- else:
- error_message = '[HTTP {}][ERR {}] {}'.format(
- data.status_code,
- self.error_code,
- error_message
- )
- # Generate the error message for the exception
- super(ArangoError, self).__init__(error_message)
- self.message = error_message
- self.http_method = data.method
- self.url = data.url
- self.http_code = data.status_code
- self.http_headers = data.headers
- elif isinstance(data, string):
- super(ArangoError, self).__init__(data)
- self.message = data
- self.error_code = None
- self.url = None
- self.http_method = None
- self.http_code = None
- self.http_headers = None
+##################
+# AQL Exceptions #
+##################
+
+
+class AQLQueryListError(ArangoServerError):
+ """Failed to retrieve running AQL queries."""
+
+
+class AQLQueryExplainError(ArangoServerError):
+ """Failed to parse and explain query."""
-#####################
-# Server Exceptions #
-#####################
+class AQLQueryValidateError(ArangoServerError):
+ """Failed to parse and validate query."""
-class ServerConnectionError(ArangoError):
- """Failed to connect to the ArangoDB instance."""
+class AQLQueryExecuteError(ArangoServerError):
+ """Failed to execute query."""
-class ServerEndpointsError(ArangoError):
- """Failed to retrieve the ArangoDB server endpoints."""
+class AQLQueryKillError(ArangoServerError):
+ """Failed to kill the query."""
-class ServerVersionError(ArangoError):
- """Failed to retrieve the ArangoDB server version."""
+class AQLQueryClearError(ArangoServerError):
+ """Failed to clear slow AQL queries."""
-class ServerDetailsError(ArangoError):
- """Failed to retrieve the ArangoDB server details."""
+class AQLQueryTrackingGetError(ArangoServerError):
+ """Failed to retrieve AQL tracking properties."""
-class ServerTimeError(ArangoError):
- """Failed to return the current ArangoDB system time."""
+class AQLQueryTrackingSetError(ArangoServerError):
+ """Failed to configure AQL tracking properties."""
-class ServerEchoError(ArangoError):
- """Failed to return the last request."""
+class AQLCachePropertiesError(ArangoServerError):
+ """Failed to retrieve query cache properties."""
-class ServerSleepError(ArangoError):
- """Failed to suspend the ArangoDB server."""
+class AQLCacheConfigureError(ArangoServerError):
+ """Failed to configure query cache properties."""
-class ServerShutdownError(ArangoError):
- """Failed to initiate a clean shutdown sequence."""
+class AQLCacheClearError(ArangoServerError):
+ """Failed to clear the query cache."""
-class ServerRunTestsError(ArangoError):
- """Failed to execute the specified tests on the server."""
+class AQLFunctionListError(ArangoServerError):
+ """Failed to retrieve AQL user functions."""
-class ServerExecuteError(ArangoError):
- """Failed to execute a the given Javascript program on the server."""
+class AQLFunctionCreateError(ArangoServerError):
+ """Failed to create AQL user function."""
-class ServerRequiredDBVersionError(ArangoError):
- """Failed to retrieve the required database version."""
+
+class AQLFunctionDeleteError(ArangoServerError):
+ """Failed to delete AQL user function."""
+
+
+##############################
+# Async Execution Exceptions #
+##############################
-class ServerReadLogError(ArangoError):
- """Failed to retrieve the global log."""
+class AsyncExecuteError(ArangoServerError):
+ """Failed to execute async API request."""
-class ServerLogLevelError(ArangoError):
- """Failed to return the log level."""
+class AsyncJobListError(ArangoServerError):
+ """Failed to retrieve async jobs."""
-class ServerLogLevelSetError(ArangoError):
- """Failed to set the log level."""
+class AsyncJobCancelError(ArangoServerError):
+ """Failed to cancel async job."""
-class ServerReloadRoutingError(ArangoError):
- """Failed to reload the routing information."""
+class AsyncJobStatusError(ArangoServerError):
+ """Failed to retrieve async job status."""
-class ServerStatisticsError(ArangoError):
- """Failed to retrieve the server statistics."""
+class AsyncJobResultError(ArangoServerError):
+ """Failed to retrieve async job result."""
-class ServerRoleError(ArangoError):
- """Failed to retrieve the role of the server in a cluster."""
+class AsyncJobClearError(ArangoServerError):
+ """Failed to clear async job results."""
##############################
-# Write-Ahead Log Exceptions #
+# Batch Execution Exceptions #
##############################
-class WALPropertiesError(ArangoError):
- """Failed to retrieve the write-ahead log."""
+class BatchStateError(ArangoClientError):
+ """The batch object was in a bad state."""
-class WALConfigureError(ArangoError):
- """Failed to configure the write-ahead log."""
+class BatchJobResultError(ArangoClientError):
+ """Failed to retrieve batch job result."""
-class WALTransactionListError(ArangoError):
- """Failed to retrieve the list of running transactions."""
+class BatchExecuteError(ArangoServerError):
+ """Failed to execute batch API request."""
-class WALFlushError(ArangoError):
- """Failed to flush the write-ahead log."""
+#########################
+# Collection Exceptions #
+#########################
-###################
-# Task Exceptions #
-###################
+class CollectionListError(ArangoServerError):
+ """Failed to retrieve collections."""
+
+
+class CollectionPropertiesError(ArangoServerError):
+ """Failed to retrieve collection properties."""
+
+
+class CollectionConfigureError(ArangoServerError):
+ """Failed to configure collection properties."""
+
+
+class CollectionStatisticsError(ArangoServerError):
+ """Failed to retrieve collection statistics."""
+
+
+class CollectionRevisionError(ArangoServerError):
+ """Failed to retrieve collection revision."""
+
+
+class CollectionChecksumError(ArangoServerError):
+ """Failed to retrieve collection checksum."""
+
+
+class CollectionCreateError(ArangoServerError):
+ """Failed to create collection."""
-class TaskListError(ArangoError):
- """Failed to list the active server tasks."""
+class CollectionDeleteError(ArangoServerError):
+ """Failed to delete collection."""
-class TaskGetError(ArangoError):
- """Failed to retrieve the active server task."""
+class CollectionRenameError(ArangoServerError):
+ """Failed to rename collection."""
-class TaskCreateError(ArangoError):
- """Failed to create a server task."""
+class CollectionTruncateError(ArangoServerError):
+ """Failed to truncate collection."""
-class TaskDeleteError(ArangoError):
- """Failed to delete a server task."""
+class CollectionLoadError(ArangoServerError):
+ """Failed to load collection."""
+
+
+class CollectionUnloadError(ArangoServerError):
+ """Failed to unload collection."""
+
+
+class CollectionRotateJournalError(ArangoServerError):
+ """Failed to rotate collection journal."""
+
+
+#####################
+# Cursor Exceptions #
+#####################
+
+
+class CursorStateError(ArangoClientError):
+ """The cursor object was in a bad state."""
+
+
+class CursorEmptyError(ArangoClientError):
+ """The current batch in cursor was empty."""
+
+
+class CursorNextError(ArangoServerError):
+ """Failed to retrieve the next result batch from server."""
+
+
+class CursorCloseError(ArangoServerError):
+ """Failed to delete the cursor result from server."""
#######################
@@ -176,359 +271,428 @@ class TaskDeleteError(ArangoError):
#######################
-class DatabaseListError(ArangoError):
- """Failed to retrieve the list of databases."""
+class DatabaseListError(ArangoServerError):
+ """Failed to retrieve databases."""
-class DatabasePropertiesError(ArangoError):
- """Failed to retrieve the database options."""
+class DatabasePropertiesError(ArangoServerError):
+ """Failed to retrieve database properties."""
-class DatabaseCreateError(ArangoError):
- """Failed to create the database."""
+class DatabaseCreateError(ArangoServerError):
+ """Failed to create database."""
-class DatabaseDeleteError(ArangoError):
- """Failed to delete the database."""
+class DatabaseDeleteError(ArangoServerError):
+ """Failed to delete database."""
-###################
-# User Exceptions #
-###################
+#######################
+# Document Exceptions #
+#######################
-class UserListError(ArangoError):
- """Failed to retrieve the users."""
+class DocumentParseError(ArangoClientError):
+ """Failed to parse document input."""
-class UserGetError(ArangoError):
- """Failed to retrieve the user."""
+class DocumentCountError(ArangoServerError):
+ """Failed to retrieve document count."""
-class UserCreateError(ArangoError):
- """Failed to create the user."""
+class DocumentInError(ArangoServerError):
+ """Failed to check whether document exists."""
-class UserUpdateError(ArangoError):
- """Failed to update the user."""
+class DocumentGetError(ArangoServerError):
+ """Failed to retrieve document."""
-class UserReplaceError(ArangoError):
- """Failed to replace the user."""
+class DocumentKeysError(ArangoServerError):
+ """Failed to retrieve document keys."""
-class UserDeleteError(ArangoError):
- """Failed to delete the user."""
+class DocumentIDsError(ArangoServerError):
+ """Failed to retrieve document IDs."""
-class UserAccessError(ArangoError):
- """Failed to retrieve the names of databases user can access."""
+class DocumentInsertError(ArangoServerError):
+ """Failed to insert document."""
-class UserGrantAccessError(ArangoError):
- """Failed to grant user access to a database."""
+class DocumentReplaceError(ArangoServerError):
+ """Failed to replace document."""
-class UserRevokeAccessError(ArangoError):
- """Failed to revoke user access to a database."""
+class DocumentUpdateError(ArangoServerError):
+ """Failed to update document."""
-#########################
-# Collection Exceptions #
-#########################
+class DocumentDeleteError(ArangoServerError):
+ """Failed to delete document."""
-class CollectionListError(ArangoError):
- """Failed to retrieve the list of collections."""
+class DocumentRevisionError(ArangoServerError):
+ """The expected and actual document revisions mismatched."""
-class CollectionPropertiesError(ArangoError):
- """Failed to retrieve the collection properties."""
+###################
+# Foxx Exceptions #
+###################
-class CollectionConfigureError(ArangoError):
- """Failed to configure the collection properties."""
+class FoxxServiceListError(ArangoServerError):
+ """Failed to retrieve Foxx services."""
-class CollectionStatisticsError(ArangoError):
- """Failed to retrieve the collection statistics."""
+class FoxxServiceGetError(ArangoServerError):
+ """Failed to retrieve Foxx service metadata."""
-class CollectionRevisionError(ArangoError):
- """Failed to retrieve the collection revision."""
+class FoxxServiceCreateError(ArangoServerError):
+ """Failed to create Foxx service."""
-class CollectionChecksumError(ArangoError):
- """Failed to retrieve the collection checksum."""
+class FoxxServiceUpdateError(ArangoServerError):
+ """Failed to update Foxx service."""
-class CollectionCreateError(ArangoError):
- """Failed to create the collection."""
+class FoxxServiceReplaceError(ArangoServerError):
+ """Failed to replace Foxx service."""
-class CollectionDeleteError(ArangoError):
- """Failed to delete the collection"""
+class FoxxServiceDeleteError(ArangoServerError):
+ """Failed to delete Foxx services."""
-class CollectionRenameError(ArangoError):
- """Failed to rename the collection."""
+class FoxxConfigGetError(ArangoServerError):
+ """Failed to retrieve Foxx service configuration."""
-class CollectionTruncateError(ArangoError):
- """Failed to truncate the collection."""
+class FoxxConfigUpdateError(ArangoServerError):
+ """Failed to update Foxx service configuration."""
-class CollectionLoadError(ArangoError):
- """Failed to load the collection into memory."""
+class FoxxConfigReplaceError(ArangoServerError):
+ """Failed to replace Foxx service configuration."""
-class CollectionUnloadError(ArangoError):
- """Failed to unload the collection from memory."""
+class FoxxDependencyGetError(ArangoServerError):
+ """Failed to retrieve Foxx service dependencies."""
-class CollectionRotateJournalError(ArangoError):
- """Failed to rotate the journal of the collection."""
+class FoxxDependencyUpdateError(ArangoServerError):
+ """Failed to update Foxx service dependencies."""
-class CollectionBadStatusError(ArangoError):
- """Unknown status was returned from the collection."""
+class FoxxDependencyReplaceError(ArangoServerError):
+ """Failed to replace Foxx service dependencies."""
-#######################
-# Document Exceptions #
-#######################
+class FoxxScriptListError(ArangoServerError):
+ """Failed to retrieve Foxx service scripts."""
-class DocumentCountError(ArangoError):
- """Failed to retrieve the count of the documents in the collections."""
+class FoxxScriptRunError(ArangoServerError):
+ """Failed to run Foxx service script."""
-class DocumentInError(ArangoError):
- """Failed to check whether a collection contains a document."""
+class FoxxTestRunError(ArangoServerError):
+ """Failed to run Foxx service tests."""
-class DocumentGetError(ArangoError):
- """Failed to retrieve the document."""
+class FoxxDevModeEnableError(ArangoServerError):
+ """Failed to enable development mode for Foxx service."""
-class DocumentInsertError(ArangoError):
- """Failed to insert the document."""
+class FoxxDevModeDisableError(ArangoServerError):
+ """Failed to disable development mode for Foxx service."""
-class DocumentReplaceError(ArangoError):
- """Failed to replace the document."""
+class FoxxReadmeGetError(ArangoServerError):
+ """Failed to retrieve Foxx service readme."""
-class DocumentUpdateError(ArangoError):
- """Failed to update the document."""
+class FoxxSwaggerGetError(ArangoServerError):
+ """Failed to retrieve Foxx service swagger."""
-class DocumentDeleteError(ArangoError):
- """Failed to delete the document."""
+class FoxxDownloadError(ArangoServerError):
+ """Failed to download Foxx service bundle."""
-class DocumentRevisionError(ArangoError):
- """The expected and actual document revisions do not match."""
+class FoxxCommitError(ArangoServerError):
+ """Failed to commit local Foxx service state."""
####################
-# Index Exceptions #
+# Graph Exceptions #
####################
-class IndexListError(ArangoError):
- """Failed to retrieve the list of indexes in the collection."""
+class GraphListError(ArangoServerError):
+ """Failed to retrieve graphs."""
-class IndexCreateError(ArangoError):
- """Failed to create the index in the collection."""
+class GraphCreateError(ArangoServerError):
+ """Failed to create the graph."""
-class IndexDeleteError(ArangoError):
- """Failed to delete the index from the collection."""
+class GraphDeleteError(ArangoServerError):
+ """Failed to delete the graph."""
-##################
-# AQL Exceptions #
-##################
+class GraphPropertiesError(ArangoServerError):
+ """Failed to retrieve graph properties."""
+
+
+class GraphTraverseError(ArangoServerError):
+ """Failed to execute graph traversal."""
+
+
+class VertexCollectionListError(ArangoServerError):
+ """Failed to retrieve vertex collections."""
+
+
+class VertexCollectionCreateError(ArangoServerError):
+ """Failed to create vertex collection."""
-class AQLQueryExplainError(ArangoError):
- """Failed to explain the AQL query."""
+class VertexCollectionDeleteError(ArangoServerError):
+ """Failed to delete vertex collection."""
-class AQLQueryValidateError(ArangoError):
- """Failed to validate the AQL query."""
+class EdgeDefinitionListError(ArangoServerError):
+ """Failed to retrieve edge definitions."""
-class AQLQueryExecuteError(ArangoError):
- """Failed to execute the AQL query."""
+class EdgeDefinitionCreateError(ArangoServerError):
+ """Failed to create edge definition."""
-class AQLCacheClearError(ArangoError):
- """Failed to clear the AQL query cache."""
+class EdgeDefinitionReplaceError(ArangoServerError):
+ """Failed to replace edge definition."""
-class AQLCachePropertiesError(ArangoError):
- """Failed to retrieve the AQL query cache properties."""
+class EdgeDefinitionDeleteError(ArangoServerError):
+ """Failed to delete edge definition."""
-class AQLCacheConfigureError(ArangoError):
- """Failed to configure the AQL query cache properties."""
+class EdgeListError(ArangoServerError):
+ """Failed to retrieve edges coming in and out of a vertex."""
-class AQLFunctionListError(ArangoError):
- """Failed to retrieve the list of AQL user functions."""
+####################
+# Index Exceptions #
+####################
+
+
+class IndexListError(ArangoServerError):
+ """Failed to retrieve collection indexes."""
-class AQLFunctionCreateError(ArangoError):
- """Failed to create the AQL user function."""
+class IndexCreateError(ArangoServerError):
+ """Failed to create collection index."""
-class AQLFunctionDeleteError(ArangoError):
- """Failed to delete the AQL user function."""
+class IndexDeleteError(ArangoServerError):
+ """Failed to delete collection index."""
+
+
+class IndexLoadError(ArangoServerError):
+ """Failed to load indexes into memory."""
#####################
-# Cursor Exceptions #
+# Pregel Exceptions #
#####################
-class CursorNextError(ArangoError):
- """Failed to retrieve the next cursor result."""
+class PregelJobCreateError(ArangoServerError):
+ """Failed to create Pregel job."""
-class CursorCloseError(ArangoError):
- """Failed to delete the cursor from the server."""
+class PregelJobGetError(ArangoServerError):
+ """Failed to retrieve Pregel job details."""
-##########################
-# Transaction Exceptions #
-##########################
+class PregelJobDeleteError(ArangoServerError):
+ """Failed to delete Pregel job."""
-class TransactionError(ArangoError):
- """Failed to execute a transaction."""
+#####################
+# Server Exceptions #
+#####################
-####################
-# Batch Exceptions #
-####################
+class ServerConnectionError(ArangoClientError):
+ """Failed to connect to ArangoDB server."""
-class BatchExecuteError(ArangoError):
- """Failed to execute the batch request."""
+class ServerEngineError(ArangoServerError):
+ """Failed to retrieve database engine."""
-####################
-# Async Exceptions #
-####################
+class ServerEndpointsError(ArangoServerError):
+ """Failed to retrieve server endpoints."""
+
+
+class ServerVersionError(ArangoServerError):
+ """Failed to retrieve server version."""
+
+
+class ServerDetailsError(ArangoServerError):
+ """Failed to retrieve server details."""
+
+
+class ServerTimeError(ArangoServerError):
+ """Failed to retrieve server system time."""
+
+class ServerEchoError(ArangoServerError):
+ """Failed to retrieve details on last request."""
-class AsyncExecuteError(ArangoError):
- """Failed to execute the asynchronous request."""
+class ServerShutdownError(ArangoServerError):
+ """Failed to initiate shutdown sequence."""
-class AsyncJobListError(ArangoError):
- """Failed to list the IDs of the asynchronous jobs."""
+class ServerRunTestsError(ArangoServerError):
+ """Failed to execute server tests."""
-class AsyncJobCancelError(ArangoError):
- """Failed to cancel the asynchronous job."""
+class ServerRequiredDBVersionError(ArangoServerError):
+ """Failed to retrieve server target version."""
-class AsyncJobStatusError(ArangoError):
- """Failed to retrieve the asynchronous job result from the server."""
+class ServerReadLogError(ArangoServerError):
+ """Failed to retrieve global log."""
-class AsyncJobResultError(ArangoError):
- """Failed to pop the asynchronous job result from the server."""
+class ServerLogLevelError(ArangoServerError):
+ """Failed to retrieve server log levels."""
+
+
+class ServerLogLevelSetError(ArangoServerError):
+ """Failed to set server log levels."""
+
+
+class ServerReloadRoutingError(ArangoServerError):
+ """Failed to reload routing details."""
+
+
+class ServerStatisticsError(ArangoServerError):
+ """Failed to retrieve server statistics."""
+
+
+class ServerRoleError(ArangoServerError):
+ """Failed to retrieve server role in a cluster."""
-class AsyncJobClearError(ArangoError):
- """Failed to delete the asynchronous job result from the server."""
#####################
-# Pregel Exceptions #
+# Task Exceptions #
#####################
-class PregelJobCreateError(ArangoError):
- """Failed to start/create a Pregel job."""
+class TaskListError(ArangoServerError):
+ """Failed to retrieve server tasks."""
-class PregelJobGetError(ArangoError):
- """Failed to retrieve a Pregel job."""
+class TaskGetError(ArangoServerError):
+ """Failed to retrieve server task details."""
-class PregelJobDeleteError(ArangoError):
- """Failed to cancel/delete a Pregel job."""
+class TaskCreateError(ArangoServerError):
+ """Failed to create server task."""
-###########################
-# Cluster Test Exceptions #
-###########################
+class TaskDeleteError(ArangoServerError):
+ """Failed to delete server task."""
-class ClusterTestError(ArangoError):
- """Failed to execute the cluster round-trip for sharding."""
+##########################
+# Transaction Exceptions #
+##########################
-####################
-# Graph Exceptions #
-####################
+class TransactionStateError(ArangoClientError):
+ """The transaction object was in bad state."""
-class GraphListError(ArangoError):
- """Failed to retrieve the list of graphs."""
+class TransactionJobResultError(ArangoClientError):
+ """Failed to retrieve transaction job result."""
-class GraphGetError(ArangoError):
- """Failed to retrieve the graph."""
+class TransactionExecuteError(ArangoServerError):
+ """Failed to execute transaction API request"""
-class GraphCreateError(ArangoError):
- """Failed to create the graph."""
+###################
+# User Exceptions #
+###################
-class GraphDeleteError(ArangoError):
- """Failed to delete the graph."""
+
+class UserListError(ArangoServerError):
+ """Failed to retrieve users."""
+
+
+class UserGetError(ArangoServerError):
+ """Failed to retrieve user details."""
-class GraphPropertiesError(ArangoError):
- """Failed to retrieve the graph properties."""
+class UserCreateError(ArangoServerError):
+ """Failed to create user."""
-class GraphTraverseError(ArangoError):
- """Failed to execute the graph traversal."""
+class UserUpdateError(ArangoServerError):
+ """Failed to update user."""
-class OrphanCollectionListError(ArangoError):
- """Failed to retrieve the list of orphaned vertex collections."""
+class UserReplaceError(ArangoServerError):
+ """Failed to replace user."""
-class VertexCollectionListError(ArangoError):
- """Failed to retrieve the list of vertex collections."""
+class UserDeleteError(ArangoServerError):
+ """Failed to delete user."""
+
+
+#########################
+# Permission Exceptions #
+#########################
-class VertexCollectionCreateError(ArangoError):
- """Failed to create the vertex collection."""
+class PermissionListError(ArangoServerError):
+ """Failed to list user permissions."""
-class VertexCollectionDeleteError(ArangoError):
- """Failed to delete the vertex collection."""
+class PermissionGetError(ArangoServerError):
+ """Failed to retrieve user permission."""
+
+
+class PermissionUpdateError(ArangoServerError):
+ """Failed to update user permission."""
+
+
+class PermissionResetError(ArangoServerError):
+ """Failed to reset user permission."""
+
+
+##################
+# WAL Exceptions #
+##################
-class EdgeDefinitionListError(ArangoError):
- """Failed to retrieve the list of edge definitions."""
+class WALPropertiesError(ArangoServerError):
+ """Failed to retrieve WAL properties."""
-class EdgeDefinitionCreateError(ArangoError):
- """Failed to create the edge definition."""
+class WALConfigureError(ArangoServerError):
+ """Failed to configure WAL properties."""
-class EdgeDefinitionReplaceError(ArangoError):
- """Failed to replace the edge definition."""
+class WALTransactionListError(ArangoServerError):
+ """Failed to retrieve running WAL transactions."""
-class EdgeDefinitionDeleteError(ArangoError):
- """Failed to delete the edge definition."""
+class WALFlushError(ArangoServerError):
+ """Failed to flush WAL."""
diff --git a/arango/executor.py b/arango/executor.py
new file mode 100644
index 00000000..6dffd05d
--- /dev/null
+++ b/arango/executor.py
@@ -0,0 +1,410 @@
+from __future__ import absolute_import, unicode_literals
+
+from six import string_types
+
+__all__ = [
+ 'DefaultExecutor',
+ 'AsyncExecutor',
+ 'BatchExecutor',
+ 'TransactionExecutor'
+]
+
+from collections import OrderedDict
+from uuid import uuid4
+
+from arango.exceptions import (
+ AsyncExecuteError,
+ BatchStateError,
+ BatchExecuteError,
+ TransactionStateError,
+ TransactionExecuteError,
+)
+from arango.job import (
+ AsyncJob,
+ BatchJob,
+ TransactionJob
+)
+from arango.request import Request
+from arango.response import Response
+from arango.utils import suppress_warning
+
+
+class Executor(object): # pragma: no cover
+ """Base class for API executors.
+
+ API executors dictate how API requests are executed depending on the
+ execution context (i.e. "default", "async", "batch", "transaction").
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ """
+ context = None
+
+ def __init__(self, connection):
+ self._conn = connection
+
+ def execute(self, request, response_handler):
+ """Execute an API request.
+
+ :param request: HTTP request.
+ :type request: arango.request.Request
+ :param response_handler: HTTP response handler.
+ :type response_handler: callable
+ :return: API execution result or job.
+ :rtype: str | unicode | bool | int | list | dict | arango.job.Job
+ """
+ raise NotImplementedError
+
+
+class DefaultExecutor(Executor):
+ """Default API executor.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ """
+ context = 'default'
+
+ def __init__(self, connection):
+ super(DefaultExecutor, self).__init__(connection)
+
+ def execute(self, request, response_handler):
+ """Execute an API request and return the result.
+
+ :param request: HTTP request.
+ :type request: arango.request.Request
+ :param response_handler: HTTP response handler.
+ :type response_handler: callable
+ :return: API execution result.
+ :rtype: str | unicode | bool | int | list | dict
+ """
+ response = self._conn.send_request(request)
+ return response_handler(response)
+
+
+class AsyncExecutor(Executor):
+ """Async API Executor.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param return_result: If set to True, API executions return instances of
+ :class:`arango.job.AsyncJob` and results can be retrieved from server
+ once available. If set to False, API executions return None and no
+ results are stored on server.
+ :type return_result: bool
+ """
+ context = 'async'
+
+ def __init__(self, connection, return_result):
+ super(AsyncExecutor, self).__init__(connection)
+ self._return_result = return_result
+
+ def execute(self, request, response_handler):
+ """Execute an API request asynchronously.
+
+ :param request: HTTP request.
+ :type request: arango.request.Request
+ :param response_handler: HTTP response handler.
+ :type response_handler: callable
+ :return: Async job or None if **return_result** parameter was set to
+ False during initialization.
+ :rtype: arango.job.AsyncJob | None
+ """
+ if self._return_result:
+ request.headers['x-arango-async'] = 'store'
+ else:
+ request.headers['x-arango-async'] = 'true'
+
+ resp = self._conn.send_request(request)
+ if not resp.is_success:
+ raise AsyncExecuteError(resp, request)
+ if not self._return_result:
+ return None
+
+ job_id = resp.headers['x-arango-async-id']
+ return AsyncJob(self._conn, job_id, response_handler)
+
+
+class BatchExecutor(Executor):
+ """Batch API executor.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param return_result: If set to True, API executions return instances of
+ :class:`arango.job.BatchJob` that are populated with results on commit.
+ If set to False, API executions return None and no results are tracked
+ client-side.
+ :type return_result: bool
+ """
+ context = 'batch'
+
+ def __init__(self, connection, return_result):
+ super(BatchExecutor, self).__init__(connection)
+ self._return_result = return_result
+ self._queue = OrderedDict()
+ self._committed = False
+
+ @property
+ def jobs(self):
+ """Return the queued batch jobs.
+
+ :return: Batch jobs or None if **return_result** parameter was set to
+ False during initialization.
+ :rtype: [arango.job.BatchJob] | None
+ """
+ if not self._return_result:
+ return None
+ return [job for _, job in self._queue.values()]
+
+ def execute(self, request, response_handler):
+ """Place the request in the batch queue.
+
+ :param request: HTTP request.
+ :type request: arango.request.Request
+ :param response_handler: HTTP response handler.
+ :type response_handler: callable
+ :return: Batch job or None if **return_result** parameter was set to
+ False during initialization.
+ :rtype: arango.job.BatchJob | None
+ :raise arango.exceptions.BatchStateError: If batch was already
+ committed.
+ """
+ if self._committed:
+ raise BatchStateError('batch already committed')
+
+ job = BatchJob(response_handler)
+ self._queue[job.id] = (request, job)
+ return job if self._return_result else None
+
+ def commit(self):
+ """Execute the queued requests in a single batch API request.
+
+ If **return_result** parameter was set to True during initialization,
+ :class:`arango.job.BatchJob` instances are populated with results.
+
+ :return: Batch jobs or None if **return_result** parameter was set to
+ False during initialization.
+ :rtype: [arango.job.BatchJob] | None
+ :raise arango.exceptions.BatchStateError: If batch state is invalid
+ (e.g. batch was already committed or size of response from server
+ did not match the expected).
+ :raise arango.exceptions.BatchExecuteError: If commit fails.
+ """
+ if self._committed:
+ raise BatchStateError('batch already committed')
+
+ self._committed = True
+
+ if len(self._queue) == 0:
+ return self.jobs
+
+ # Boundary used for multipart request
+ boundary = uuid4().hex
+
+ # Buffer for building the batch request payload
+ buffer = []
+ for req, job in self._queue.values():
+ buffer.append('--{}'.format(boundary))
+ buffer.append('Content-Type: application/x-arango-batchpart')
+ buffer.append('Content-Id: {}'.format(job.id))
+ buffer.append('\r\n{}'.format(req))
+ buffer.append('--{}--'.format(boundary))
+
+ request = Request(
+ method='post',
+ endpoint='/_api/batch',
+ headers={
+ 'Content-Type':
+ 'multipart/form-data; boundary={}'.format(boundary)
+ },
+ data='\r\n'.join(buffer)
+ )
+ with suppress_warning('requests.packages.urllib3.connectionpool'):
+ resp = self._conn.send_request(request)
+
+ if not resp.is_success:
+ raise BatchExecuteError(resp, request)
+
+ if not self._return_result:
+ return None
+
+ raw_resps = resp.raw_body.split('--{}'.format(boundary))[1:-1]
+ if len(self._queue) != len(raw_resps):
+ raise BatchStateError(
+ 'expecting {} parts in batch response but got {}'
+ .format(len(self._queue), len(raw_resps))
+ )
+ for raw_resp in raw_resps:
+ # Parse and breakdown the batch response body
+ resp_parts = raw_resp.strip().split('\r\n')
+ raw_content_id = resp_parts[1]
+ raw_body = resp_parts[-1]
+ raw_status = resp_parts[3]
+ job_id = raw_content_id.split(' ')[1]
+ _, status_code, status_text = raw_status.split(' ', 2)
+
+ # Update the corresponding batch job
+ queued_req, queued_job = self._queue[job_id]
+ queued_job._response = Response(
+ method=queued_req.method,
+ url=self._conn.url_prefix + queued_req.endpoint,
+ headers={},
+ status_code=int(status_code),
+ status_text=status_text,
+ raw_body=raw_body
+ )
+ queued_job._status = 'done'
+
+ return self.jobs
+
+
+class TransactionExecutor(Executor):
+ """Executes transaction API requests.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param return_result: If set to True, API executions return instances of
+ :class:`arango.job.TransactionJob` that are populated with results on
+ commit. If set to False, API executions return None and no results are
+ tracked client-side.
+ :type return_result: bool
+ :param timeout: Timeout for waiting on collection locks. If set to 0,
+ ArangoDB server waits indefinitely. If not set, system default value
+ is used.
+ :type timeout: int
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param read: Names of collections read during transaction.
+ :type read: [str | unicode]
+ :param write: Names of collections written to during transaction.
+ :type write: [str | unicode]
+ """
+ context = 'transaction'
+
+ def __init__(self, connection, return_result, read, write, timeout, sync):
+ super(TransactionExecutor, self).__init__(connection)
+ self._return_result = return_result
+ self._read = read
+ self._write = write
+ self._timeout = timeout
+ self._sync = sync
+ self._queue = OrderedDict()
+ self._committed = False
+
+ @property
+ def jobs(self):
+ """Return the queued transaction jobs.
+
+ :return: Transaction jobs or None if **return_result** parameter was
+ set to False during initialization.
+ :rtype: [arango.job.TransactionJob] | None
+ """
+ if not self._return_result:
+ return None
+ return [job for _, job in self._queue.values()]
+
+ def execute(self, request, response_handler):
+ """Place the request in the transaction queue.
+
+ :param request: HTTP request.
+ :type request: arango.request.Request
+ :param response_handler: HTTP response handler.
+ :type response_handler: callable
+ :return: Transaction job or None if **return_result** parameter was
+ set to False during initialization.
+ :rtype: arango.job.TransactionJob | None
+ :raise arango.exceptions.TransactionStateError: If the transaction was
+ already committed or if the action does not support transactions.
+ """
+ if self._committed:
+ raise TransactionStateError('transaction already committed')
+ if request.command is None:
+ raise TransactionStateError('action not allowed in transaction')
+
+ job = TransactionJob(response_handler)
+ self._queue[job.id] = (request, job)
+ return job if self._return_result else None
+
+ def commit(self):
+ """Execute the queued requests in a single transaction API request.
+
+ If **return_result** parameter was set to True during initialization,
+ :class:`arango.job.TransactionJob` instances are populated with
+ results.
+
+ :return: Transaction jobs or None if **return_result** parameter was
+ set to False during initialization.
+ :rtype: [arango.job.TransactionJob] | None
+ :raise arango.exceptions.TransactionStateError: If the transaction was
+ already committed.
+ :raise arango.exceptions.TransactionExecuteError: If commit fails.
+ """
+ if self._committed:
+ raise TransactionStateError('transaction already committed')
+
+ self._committed = True
+
+ if len(self._queue) == 0:
+ return self.jobs
+
+ write_collections = set()
+ read_collections = set()
+
+ # Buffer for building the transaction javascript command
+ cmd_buffer = [
+ 'var db = require("internal").db',
+ 'var gm = require("@arangodb/general-graph")',
+ 'var result = {}'
+ ]
+ for req, job in self._queue.values():
+ if isinstance(req.read, string_types):
+ read_collections.add(req.read)
+ elif req.read is not None:
+ read_collections |= set(req.read)
+
+ if isinstance(req.write, string_types):
+ write_collections.add(req.write)
+ elif req.write is not None:
+ write_collections |= set(req.write)
+
+ cmd_buffer.append('result["{}"] = {}'.format(job.id, req.command))
+
+ cmd_buffer.append('return result;')
+
+ data = {
+ 'action': 'function () {{ {} }}'.format(';'.join(cmd_buffer)),
+ 'collections': {
+ 'read': list(read_collections),
+ 'write': list(write_collections),
+ 'allowImplicit': True
+ }
+ }
+ if self._timeout is not None:
+ data['lockTimeout'] = self._timeout
+ if self._sync is not None:
+ data['waitForSync'] = self._sync
+
+ request = Request(
+ method='post',
+ endpoint='/_api/transaction',
+ data=data,
+ )
+ resp = self._conn.send_request(request)
+
+ if not resp.is_success:
+ raise TransactionExecuteError(resp, request)
+
+ if not self._return_result:
+ return None
+
+ result = resp.body['result']
+ for req, job in self._queue.values():
+ job._response = Response(
+ method=req.method,
+ url=self._conn.url_prefix + req.endpoint,
+ headers={},
+ status_code=200,
+ status_text='OK',
+ raw_body=result.get(job.id)
+ )
+ job._status = 'done'
+ return self.jobs
diff --git a/arango/foxx.py b/arango/foxx.py
new file mode 100644
index 00000000..2393387e
--- /dev/null
+++ b/arango/foxx.py
@@ -0,0 +1,705 @@
+from __future__ import absolute_import, unicode_literals
+
+__all__ = ['Foxx']
+
+from arango.api import APIWrapper
+from arango.exceptions import (
+ FoxxServiceCreateError,
+ FoxxServiceDeleteError,
+ FoxxServiceGetError,
+ FoxxServiceListError,
+ FoxxServiceReplaceError,
+ FoxxServiceUpdateError,
+ FoxxConfigGetError,
+ FoxxConfigReplaceError,
+ FoxxConfigUpdateError,
+ FoxxDependencyGetError,
+ FoxxDependencyReplaceError,
+ FoxxDependencyUpdateError,
+ FoxxScriptListError,
+ FoxxScriptRunError,
+ FoxxTestRunError,
+ FoxxDevModeEnableError,
+ FoxxDevModeDisableError,
+ FoxxReadmeGetError,
+ FoxxSwaggerGetError,
+ FoxxCommitError,
+ FoxxDownloadError,
+)
+from arango.request import Request
+
+
+class Foxx(APIWrapper):
+ """Foxx API wrapper.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
+ """
+
+ def __init__(self, connection, executor):
+ super(Foxx, self).__init__(connection, executor)
+
+ def __repr__(self):
+ return ''.format(self._conn.db_name)
+
+ def services(self, exclude_system=False):
+ """List installed services.
+
+ :param exclude_system: If set to True, system services are excluded.
+ :type exclude_system: bool
+ :return: List of installed service.
+ :rtype: [dict]
+ :raise arango.exceptions.FoxxServiceListError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/foxx',
+ params={'excludeSystem': exclude_system}
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxServiceListError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def service(self, mount):
+ """Return service metadata.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :return: Service metadata.
+ :rtype: dict
+ :raise arango.exceptions.FoxxServiceGetError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/foxx/service',
+ params={'mount': mount}
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxServiceGetError(resp, request)
+
+ if 'manifest' in resp.body:
+ mf = resp.body['manifest']
+ if 'defaultDocument' in mf:
+ mf['default_document'] = mf.pop('defaultDocument')
+
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def create_service(self,
+ mount,
+ source,
+ config=None,
+ dependencies=None,
+ development=None,
+ setup=None,
+ legacy=None):
+ """Install a new service.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param source: Fully qualified URL or absolute path on the server file
+ system. Must be accessible by the server, or by all servers if in
+ a cluster.
+ :type source: str | unicode
+ :param config: Configuration values.
+ :type config: dict
+ :param dependencies: Dependency settings.
+ :type dependencies: dict
+ :param development: Enable development mode.
+ :type development: bool
+ :param setup: Run service setup script.
+ :type setup: bool
+ :param legacy: Install the service in 2.8 legacy compatibility mode.
+ :type legacy: bool
+ :return: Service metadata.
+ :rtype: dict
+ :raise arango.exceptions.FoxxServiceCreateError: If install fails.
+ """
+ params = {'mount': mount}
+ if development is not None:
+ params['development'] = development
+ if setup is not None:
+ params['setup'] = setup
+ if legacy is not None:
+ params['legacy'] = legacy
+
+ data = {'source': source}
+ if config is not None:
+ data['configuration'] = config
+ if dependencies is not None:
+ data['dependencies'] = dependencies
+
+ request = Request(
+ method='post',
+ endpoint='/_api/foxx',
+ params=params,
+ data=data,
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxServiceCreateError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def update_service(self,
+ mount,
+ source=None,
+ config=None,
+ dependencies=None,
+ teardown=None,
+ setup=None,
+ legacy=None):
+ """Update (upgrade) a service.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param source: Fully qualified URL or absolute path on the server file
+ system. Must be accessible by the server, or by all servers if in
+ a cluster.
+ :type source: str | unicode
+ :param config: Configuration values.
+ :type config: dict
+ :param dependencies: Dependency settings.
+ :type dependencies: dict
+ :param teardown: Run service teardown script.
+ :type teardown: bool
+ :param setup: Run service setup script.
+ :type setup: bool
+ :param legacy: Install the service in 2.8 legacy compatibility mode.
+ :type legacy: bool
+ :return: Updated service metadata.
+ :rtype: dict
+ :raise arango.exceptions.FoxxServiceUpdateError: If update fails.
+ """
+ params = {'mount': mount}
+ if teardown is not None:
+ params['teardown'] = teardown
+ if setup is not None:
+ params['setup'] = setup
+ if legacy is not None:
+ params['legacy'] = legacy
+
+ data = {'source': source}
+ if config is not None:
+ data['configuration'] = config
+ if dependencies is not None:
+ data['dependencies'] = dependencies
+
+ request = Request(
+ method='patch',
+ endpoint='/_api/foxx/service',
+ params=params,
+ data=data,
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxServiceUpdateError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def replace_service(self,
+ mount,
+ source,
+ config=None,
+ dependencies=None,
+ teardown=None,
+ setup=None,
+ legacy=None,
+ force=None):
+ """Replace a service by removing the old one and installing a new one.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param source: Fully qualified URL or absolute path on the server file
+ system. Must be accessible by the server, or by all servers if in
+ a cluster.
+ :type source: str | unicode
+ :param config: Configuration values.
+ :type config: dict
+ :param dependencies: Dependency settings.
+ :type dependencies: dict
+ :param teardown: Run service teardown script.
+ :type teardown: bool
+ :param setup: Run service setup script.
+ :type setup: bool
+ :param legacy: Install the service in 2.8 legacy compatibility mode.
+ :type legacy: bool
+ :param force: Force install if no service is found.
+ :type force: bool
+ :return: Replaced service metadata.
+ :rtype: dict
+ :raise arango.exceptions.FoxxServiceReplaceError: If replace fails.
+ """
+ params = {'mount': mount}
+ if teardown is not None:
+ params['teardown'] = teardown
+ if setup is not None:
+ params['setup'] = setup
+ if legacy is not None:
+ params['legacy'] = legacy
+ if force is not None:
+ params['force'] = force
+
+ data = {'source': source}
+ if config is not None:
+ data['configuration'] = config
+ if dependencies is not None:
+ data['dependencies'] = dependencies
+
+ request = Request(
+ method='put',
+ endpoint='/_api/foxx/service',
+ params=params,
+ data=data,
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxServiceReplaceError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def delete_service(self, mount, teardown=None):
+ """Uninstall a service.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param teardown: Run service teardown script.
+ :type teardown: bool
+ :return: True if service was deleted successfully.
+ :rtype: bool
+ :raise arango.exceptions.FoxxServiceDeleteError: If delete fails.
+ """
+ params = {'mount': mount}
+ if teardown is not None:
+ params['teardown'] = teardown
+
+ request = Request(
+ method='delete',
+ endpoint='/_api/foxx/service',
+ params=params
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxServiceDeleteError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ def config(self, mount):
+ """Return service configuration.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :return: Configuration values.
+ :rtype: dict
+ :raise arango.exceptions.FoxxConfigGetError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/foxx/configuration',
+ params={'mount': mount},
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxConfigGetError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def update_config(self, mount, config):
+ """Update service configuration.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param config: Configuration values. Omitted options are ignored.
+ :type config: dict
+ :return: Updated configuration values.
+ :rtype: dict
+ :raise arango.exceptions.FoxxConfigUpdateError: If update fails.
+ """
+ request = Request(
+ method='patch',
+ endpoint='/_api/foxx/configuration',
+ params={'mount': mount},
+ data=config
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxConfigUpdateError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def replace_config(self, mount, config):
+ """Replace service configuration.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param config: Configuration values. Omitted options are reset to their
+ default values or marked as un-configured.
+ :type config: dict
+ :return: Replaced configuration values.
+ :rtype: dict
+ :raise arango.exceptions.FoxxConfigReplaceError: If replace fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/foxx/configuration',
+ params={'mount': mount},
+ data=config
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxConfigReplaceError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def dependencies(self, mount):
+ """Return service dependencies.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :return: Dependency settings.
+ :rtype: dict
+ :raise arango.exceptions.FoxxDependencyGetError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/foxx/dependencies',
+ params={'mount': mount},
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxDependencyGetError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def update_dependencies(self, mount, dependencies):
+ """Update service dependencies.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param dependencies: Dependencies settings. Omitted ones are ignored.
+ :type dependencies: dict
+ :return: Updated dependency settings.
+ :rtype: dict
+ :raise arango.exceptions.FoxxDependencyUpdateError: If update fails.
+ """
+ request = Request(
+ method='patch',
+ endpoint='/_api/foxx/dependencies',
+ params={'mount': mount},
+ data=dependencies
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxDependencyUpdateError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def replace_dependencies(self, mount, dependencies):
+ """Replace service dependencies.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param dependencies: Dependencies settings. Omitted ones are disabled.
+ :type dependencies: dict
+ :return: Replaced dependency settings.
+ :rtype: dict
+ :raise arango.exceptions.FoxxDependencyReplaceError: If replace fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/foxx/dependencies',
+ params={'mount': mount},
+ data=dependencies
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxDependencyReplaceError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def enable_development(self, mount):
+ """Put the service into development mode.
+
+ While the service is running in development mode, it is reloaded from
+ the file system, and its setup script (if any) is re-executed every
+ time the service handles a request.
+
+ In a cluster with multiple coordinators, changes to the filesystem on
+ one coordinator is not reflected across other coordinators.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :return: Service metadata.
+ :rtype: dict
+ :raise arango.exceptions.FoxxDevModeEnableError: If operation fails.
+ """
+ request = Request(
+ method='post',
+ endpoint='/_api/foxx/development',
+ params={'mount': mount},
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxDevModeEnableError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def disable_development(self, mount):
+ """Put the service into production mode.
+
+ In a cluster with multiple coordinators, the services on all other
+ coordinators are replaced with the version on the calling coordinator.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :return: Service metadata.
+ :rtype: dict
+ :raise arango.exceptions.FoxxDevModeDisableError: If operation fails.
+ """
+ request = Request(
+ method='delete',
+ endpoint='/_api/foxx/development',
+ params={'mount': mount},
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxDevModeDisableError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def readme(self, mount):
+ """Return the service readme.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :return: Service readme.
+ :rtype: str | unicode
+ :raise arango.exceptions.FoxxReadmeGetError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/foxx/readme',
+ params={'mount': mount},
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxReadmeGetError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def swagger(self, mount):
+ """Return the Swagger API description for the given service.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :return: Swagger API description.
+ :rtype: dict
+ :raise arango.exceptions.FoxxSwaggerGetError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/foxx/swagger',
+ params={'mount': mount}
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxSwaggerGetError(resp, request)
+ if 'basePath' in resp.body:
+ resp.body['base_path'] = resp.body.pop('basePath')
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def download(self, mount):
+ """Download service bundle.
+
+ When development mode is enabled, a new bundle is created every time.
+ Otherwise, the bundle represents the version of the service installed
+ on the server.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :return: Service bundle in raw string form.
+ :rtype: str | unicode
+ :raise arango.exceptions.FoxxDownloadError: If download fails.
+ """
+ request = Request(
+ method='post',
+ endpoint='/_api/foxx/download',
+ params={'mount': mount}
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxDownloadError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def commit(self, replace=None):
+ """Commit local service state of the coordinator to the database.
+
+ This can be used to resolve service conflicts between coordinators
+ that cannot be fixed automatically due to missing data.
+
+ :param replace: Overwrite any existing service files in database.
+ :type replace: bool
+ :return: True if the state was committed successfully.
+ :rtype: bool
+ :raise arango.exceptions.FoxxCommitError: If commit fails.
+ """
+ params = {}
+ if replace is not None:
+ params['replace'] = replace
+
+ request = Request(
+ method='post',
+ endpoint='/_api/foxx/commit',
+ params=params
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxCommitError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
+
+ def scripts(self, mount):
+ """List service scripts.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :return: Service scripts.
+ :rtype: dict
+ :raise arango.exceptions.FoxxScriptListError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/foxx/scripts',
+ params={'mount': mount},
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxScriptListError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def run_script(self, mount, name, arg=None):
+ """Run a service script.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param name: Script name.
+ :type name: str | unicode
+ :param arg: Arbitrary value passed into the script as first argument.
+ :type arg: str | unicode | bool | int | list | dict
+ :return: Result of the script, if any.
+ :rtype: dict
+ :raise arango.exceptions.FoxxScriptRunError: If script fails.
+ """
+ request = Request(
+ method='post',
+ endpoint='/_api/foxx/scripts/{}'.format(name),
+ params={'mount': mount},
+ data=arg or {}
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxScriptRunError(resp, request)
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def run_tests(self,
+ mount,
+ reporter='default',
+ idiomatic=None,
+ output_format=None):
+ """Run service tests.
+
+ :param mount: Service mount path (e.g "/_admin/aardvark").
+ :type mount: str | unicode
+ :param reporter: Test reporter. Allowed values are "default" (simple
+ list of test cases), "suite" (object of test cases nested in
+ suites), "stream" (raw stream of test results), "xunit" (XUnit or
+ JUnit compatible structure), or "tap" (raw TAP compatible stream).
+ :type reporter: str | unicode
+ :param idiomatic: Use matching format for the reporter, regardless of
+ the value of parameter **output_format**.
+ :type: bool
+ :param output_format: Used to further control format. Allowed values
+ are "x-ldjson", "xml" and "text". When using "stream" reporter,
+ setting this to "x-ldjson" returns newline-delimited JSON stream.
+ When using "tap" reporter, setting this to "text" returns plain
+ text TAP report. When using "xunit" reporter, settings this to
+ "xml" returns an XML instead of JSONML.
+ :type output_format: str | unicode
+ :return: Reporter output (e.g. raw JSON string, XML, plain text).
+ :rtype: str | unicode
+ :raise arango.exceptions.FoxxTestRunError: If test fails.
+ """
+ params = {'mount': mount, 'reporter': reporter}
+ if idiomatic is not None:
+ params['idiomatic'] = idiomatic
+
+ headers = {}
+ if output_format == 'x-ldjson':
+ headers['Accept'] = 'application/x-ldjson'
+ elif output_format == 'xml':
+ headers['Accept'] = 'application/xml'
+ elif output_format == 'text':
+ headers['Accept'] = 'text/plain'
+
+ request = Request(
+ method='post',
+ endpoint='/_api/foxx/tests',
+ params=params,
+ headers=headers
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise FoxxTestRunError(resp, request)
+ return resp.raw_body
+
+ return self._execute(request, response_handler)
diff --git a/arango/graph.py b/arango/graph.py
index 3845013c..20e57494 100644
--- a/arango/graph.py
+++ b/arango/graph.py
@@ -1,155 +1,163 @@
from __future__ import absolute_import, unicode_literals
-from arango.collections import (
- EdgeCollection,
- VertexCollection
+__all__ = ['Graph']
+
+from arango.api import APIWrapper
+from arango.collection import EdgeCollection
+from arango.collection import VertexCollection
+from arango.exceptions import (
+ EdgeDefinitionListError,
+ EdgeDefinitionCreateError,
+ EdgeDefinitionDeleteError,
+ EdgeDefinitionReplaceError,
+ GraphPropertiesError,
+ GraphTraverseError,
+ VertexCollectionListError,
+ VertexCollectionCreateError,
+ VertexCollectionDeleteError,
)
-from arango.utils import HTTP_OK
-from arango.exceptions import *
from arango.request import Request
-from arango.api import APIWrapper, api_method
+from arango.utils import get_col_name, get_id
class Graph(APIWrapper):
- """ArangoDB graph.
+ """Graph API wrapper.
- A graph can have vertex and edge collections.
-
- :param connection: ArangoDB connection object
- :type connection: arango.connection.Connection
- :param name: the name of the graph
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
+ :param name: Graph name.
:type name: str | unicode
"""
- def __init__(self, connection, name):
- self._conn = connection
+ def __init__(self, connection, executor, name):
+ super(Graph, self).__init__(connection, executor)
self._name = name
def __repr__(self):
- return "".format(self._name)
+ return ''.format(self._name)
- @property
- def name(self):
- """Return the name of the graph.
+ def _get_col_by_vertex(self, vertex):
+ """Return the vertex collection for the given vertex document.
- :returns: the name of the graph
- :rtype: str
+ :param vertex: Vertex document ID or body with "_id" field.
+ :type vertex: str | unicode | dict
+ :return: Vertex collection API wrapper.
+ :rtype: arango.collection.VertexCollection
"""
- return self._name
+ return self.vertex_collection(get_col_name(vertex))
- def vertex_collection(self, name):
- """Return the vertex collection object.
+ def _get_col_by_edge(self, edge):
+ """Return the vertex collection for the given edge document.
- :param name: the name of the vertex collection
- :type name: str | unicode
- :returns: the vertex collection object
- :rtype: arango.collections.vertex.VertexCollection
+ :param edge: Edge document ID or body with "_id" field.
+ :type edge: str | unicode | dict
+ :return: Edge collection API wrapper.
+ :rtype: arango.collection.EdgeCollection
"""
- return VertexCollection(self._conn, self._name, name)
+ return self.edge_collection(get_col_name(edge))
- def edge_collection(self, name):
- """Return the edge collection object.
+ @property
+ def name(self):
+ """Return the graph name.
- :param name: the name of the edge collection
- :type name: str | unicode
- :returns: the edge collection object
- :rtype: arango.collections.edge.EdgeCollection
+ :return: Graph name.
+ :rtype: str | unicode
"""
- return EdgeCollection(self._conn, self._name, name)
+ return self._name
- @api_method
def properties(self):
- """Return the graph properties.
+ """Return graph properties.
- :returns: the graph properties
+ :return: Graph properties.
:rtype: dict
- :raises arango.exceptions.GraphPropertiesError: if the properties
- of the graph cannot be retrieved
+ :raise arango.exceptions.GraphPropertiesError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/_api/gharial/{}'.format(self._name)
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise GraphPropertiesError(res)
- record = res.body['graph']
- return {
- 'id': record['_id'],
- 'name': record['name'],
- 'revision': record['_rev'],
- 'orphan_collections': record['orphanCollections'],
+ def response_handler(resp):
+ if not resp.is_success:
+ raise GraphPropertiesError(resp, request)
+ body = resp.body['graph']
+ properties = {
+ 'id': body['_id'],
+ 'name': body['name'],
+ 'revision': body['_rev'],
+ 'orphan_collections': body['orphanCollections'],
'edge_definitions': [
{
- 'name': edge_definition['collection'],
- 'to_collections': edge_definition['to'],
- 'from_collections': edge_definition['from']
+ 'edge_collection': edge_definition['collection'],
+ 'from_vertex_collections': edge_definition['from'],
+ 'to_vertex_collections': edge_definition['to'],
}
- for edge_definition in record['edgeDefinitions']
- ],
- 'smart': record.get('isSmart'),
- 'smart_field': record.get('smartGraphAttribute'),
- 'shard_count': record.get('numberOfShards')
+ for edge_definition in body['edgeDefinitions']
+ ]
}
- return request, handler
+ if 'isSmart' in body:
+ properties['smart'] = body['isSmart']
+ if 'smartGraphAttribute' in body:
+ properties['smart_field'] = body['smartGraphAttribute']
+ if 'numberOfShards' in body:
+ properties['shard_count'] = body['numberOfShards']
+ if 'replicationFactor' in body:
+ properties['replication_factor'] = body['replicationFactor']
+ return properties
+
+ return self._execute(request, response_handler)
################################
# Vertex Collection Management #
################################
- @api_method
- def orphan_collections(self):
- """Return the orphan vertex collections of the graph.
+ def has_vertex_collection(self, name):
+ """Check if the graph has the given vertex collection.
- :returns: the names of the orphan vertex collections
- :rtype: list
- :raises arango.exceptions.OrphanCollectionListError: if the list of
- orphan vertex collections cannot be retrieved
+ :param name: Vertex collection name.
+ :type name: str | unicode
+ :return: True if vertex collection exists, False otherwise.
+ :rtype: bool
"""
- request = Request(
- method='get',
- endpoint='/_api/gharial/{}'.format(self._name)
- )
+ return name in self.vertex_collections()
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise OrphanCollectionListError(res)
- return res.body['graph']['orphanCollections']
-
- return request, handler
-
- @api_method
def vertex_collections(self):
- """Return the vertex collections of the graph.
+ """Return vertex collections in the graph that are not orphaned.
- :returns: the names of the vertex collections
- :rtype: list
- :raises arango.exceptions.VertexCollectionListError: if the list of
- vertex collections cannot be retrieved
+ :return: Names of vertex collections that are not orphaned.
+ :rtype: [str | unicode]
+ :raise arango.exceptions.VertexCollectionListError: If retrieval fails.
"""
request = Request(
method='get',
- endpoint='/_api/gharial/{}/vertex'.format(self._name)
+ endpoint='/_api/gharial/{}/vertex'.format(self._name),
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise VertexCollectionListError(res)
- return res.body['collections']
+ def response_handler(resp):
+ if not resp.is_success:
+ raise VertexCollectionListError(resp, request)
+ return sorted(set(resp.body['collections']))
- return request, handler
+ return self._execute(request, response_handler)
+
+ def vertex_collection(self, name):
+ """Return the vertex collection API wrapper.
+
+ :param name: Vertex collection name.
+ :type name: str | unicode
+ :return: Vertex collection API wrapper.
+ :rtype: arango.collection.VertexCollection
+ """
+ return VertexCollection(self._conn, self._executor, self._name, name)
- @api_method
def create_vertex_collection(self, name):
- """Create a vertex collection for the graph.
+ """Create a vertex collection in the graph.
- :param name: the name of the new vertex collection to create
+ :param name: Vertex collection name.
:type name: str | unicode
- :returns: the vertex collection object
- :rtype: arango.collections.vertex.VertexCollection
- :raises arango.exceptions.VertexCollectionCreateError: if the vertex
- collection cannot be created
+ :return: Vertex collection API wrapper.
+ :rtype: arango.collection.VertexCollection
+ :raise arango.exceptions.VertexCollectionCreateError: If create fails.
"""
request = Request(
method='post',
@@ -157,25 +165,24 @@ def create_vertex_collection(self, name):
data={'collection': name}
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise VertexCollectionCreateError(res)
- return VertexCollection(self._conn, self._name, name)
+ def response_handler(resp):
+ if not resp.is_success:
+ raise VertexCollectionCreateError(resp, request)
+ return self.vertex_collection(name)
- return request, handler
+ return self._execute(request, response_handler)
- @api_method
def delete_vertex_collection(self, name, purge=False):
- """Remove the vertex collection from the graph.
+ """Remove a vertex collection from the graph.
- :param name: the name of the vertex collection to remove
+ :param name: Vertex collection name.
:type name: str | unicode
- :param purge: delete the vertex collection completely
+ :param purge: If set to True, the vertex collection is not just deleted
+ from the graph but also from the database completely.
:type purge: bool
- :returns: whether the operation was successful
+ :return: True if vertex collection was deleted successfully.
:rtype: bool
- :raises arango.exceptions.VertexCollectionDeleteError: if the vertex
- collection cannot be removed from the graph
+ :raise arango.exceptions.VertexCollectionDeleteError: If delete fails.
"""
request = Request(
method='delete',
@@ -183,127 +190,153 @@ def delete_vertex_collection(self, name, purge=False):
params={'dropCollection': purge}
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise VertexCollectionDeleteError(res)
- return not res.body['error']
+ def response_handler(resp):
+ if not resp.is_success:
+ raise VertexCollectionDeleteError(resp, request)
+ return True
- return request, handler
+ return self._execute(request, response_handler)
##############################
- # Edge Definition Management #
+ # Edge Collection Management #
##############################
- @api_method
- def edge_definitions(self):
- """Return the edge definitions of the graph.
+ def has_edge_definition(self, name):
+ """Check if the graph has the given edge definition.
- :returns: the edge definitions of the graph
- :rtype: list
- :raises arango.exceptions.EdgeDefinitionListError: if the list of
- edge definitions cannot be retrieved
+ :param name: Edge collection name.
+ :type name: str | unicode
+ :return: True if edge definition exists, False otherwise.
+ :rtype: bool
"""
- request = Request(
- method='get',
- endpoint='/_api/gharial/{}'.format(self._name)
+ return any(
+ definition['edge_collection'] == name
+ for definition in self.edge_definitions()
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise EdgeDefinitionListError(res)
- return [
- {
- 'name': edge_definition['collection'],
- 'to_collections': edge_definition['to'],
- 'from_collections': edge_definition['from']
- }
- for edge_definition in
- res.body['graph']['edgeDefinitions']
- ]
-
- return request, handler
-
- @api_method
- def create_edge_definition(self, name, from_collections, to_collections):
- """Create a new edge definition for the graph.
-
- An edge definition consists of an edge collection, one or more "from"
- vertex collections, one or more "to" vertex collections.
-
- :param name: the name of the new edge collection
+ def has_edge_collection(self, name):
+ """Check if the graph has the given edge collection.
+
+ :param name: Edge collection name.
:type name: str | unicode
- :param from_collections: the name(s) of the "from" vertex collections
- :type from_collections: list
- :param to_collections: the names of the "to" vertex collections
- :type to_collections: list
- :returns: the edge collection object
- :rtype: arango.collections.edge.EdgeCollection
- :raises arango.exceptions.EdgeDefinitionCreateError: if the edge
- definition cannot be created
+ :return: True if edge collection exists, False otherwise.
+ :rtype: bool
+ """
+ return self.has_edge_definition(name)
+
+ def edge_collection(self, name):
+ """Return the edge collection API wrapper.
+
+ :param name: Edge collection name.
+ :type name: str | unicode
+ :return: Edge collection API wrapper.
+ :rtype: arango.collection.EdgeCollection
+ """
+ return EdgeCollection(self._conn, self._executor, self._name, name)
+
+ def edge_definitions(self):
+ """Return the edge definitions of the graph.
+
+ :return: Edge definitions of the graph.
+ :rtype: [dict]
+ :raise arango.exceptions.EdgeDefinitionListError: If retrieval fails.
+ """
+ try:
+ return self.properties()['edge_definitions']
+ except GraphPropertiesError as err:
+ raise EdgeDefinitionListError(err.response, err.request)
+
+ def create_edge_definition(self,
+ edge_collection,
+ from_vertex_collections,
+ to_vertex_collections):
+ """Create a new edge definition.
+
+ An edge definition consists of an edge collection, "from" vertex
+ collection(s) and "to" vertex collection(s). Here is an example entry:
+
+ .. code-block:: python
+
+ {
+ 'edge_collection': 'edge_collection_name',
+ 'from_vertex_collections': ['from_vertex_collection_name'],
+ 'to_vertex_collections': ['to_vertex_collection_name']
+ }
+
+ :param edge_collection: Edge collection name.
+ :type edge_collection: str | unicode
+ :param from_vertex_collections: Names of "from" vertex collections.
+ :type from_vertex_collections: [str | unicode]
+ :param to_vertex_collections: Names of "to" vertex collections.
+ :type to_vertex_collections: [str | unicode]
+ :return: Edge collection API wrapper.
+ :rtype: arango.collection.EdgeCollection
+ :raise arango.exceptions.EdgeDefinitionCreateError: If create fails.
"""
request = Request(
method='post',
endpoint='/_api/gharial/{}/edge'.format(self._name),
data={
- 'collection': name,
- 'from': from_collections,
- 'to': to_collections
+ 'collection': edge_collection,
+ 'from': from_vertex_collections,
+ 'to': to_vertex_collections
}
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise EdgeDefinitionCreateError(res)
- return EdgeCollection(self._conn, self._name, name)
-
- return request, handler
-
- @api_method
- def replace_edge_definition(self, name, from_collections, to_collections):
- """Replace an edge definition in the graph.
-
- :param name: the name of the edge definition to replace
- :type name: str | unicode
- :param from_collections: the names of the "from" vertex collections
- :type from_collections: list
- :param to_collections: the names of the "to" vertex collections
- :type to_collections: list
- :returns: whether the operation was successful
+ def response_handler(resp):
+ if not resp.is_success:
+ raise EdgeDefinitionCreateError(resp, request)
+ return self.edge_collection(edge_collection)
+
+ return self._execute(request, response_handler)
+
+ def replace_edge_definition(self,
+ edge_collection,
+ from_vertex_collections,
+ to_vertex_collections):
+ """Replace an edge definition.
+
+ :param edge_collection: Edge collection name.
+ :type edge_collection: str | unicode
+ :param from_vertex_collections: Names of "from" vertex collections.
+ :type from_vertex_collections: [str | unicode]
+ :param to_vertex_collections: Names of "to" vertex collections.
+ :type to_vertex_collections: [str | unicode]
+ :return: True if edge definition was replaced successfully.
:rtype: bool
- :raises arango.exceptions.EdgeDefinitionReplaceError: if the edge
- definition cannot be replaced
+ :raise arango.exceptions.EdgeDefinitionReplaceError: If replace fails.
"""
request = Request(
method='put',
endpoint='/_api/gharial/{}/edge/{}'.format(
- self._name, name
+ self._name, edge_collection
),
data={
- 'collection': name,
- 'from': from_collections,
- 'to': to_collections
+ 'collection': edge_collection,
+ 'from': from_vertex_collections,
+ 'to': to_vertex_collections
}
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise EdgeDefinitionReplaceError(res)
- return not res.body['error']
+ def response_handler(resp):
+ if not resp.is_success:
+ raise EdgeDefinitionReplaceError(resp, request)
+ return self.edge_collection(edge_collection)
- return request, handler
+ return self._execute(request, response_handler)
- @api_method
def delete_edge_definition(self, name, purge=False):
- """Remove an edge definition from the graph.
+ """Delete an edge definition from the graph.
- :param name: the name of the edge collection
+ :param name: Edge collection name.
:type name: str | unicode
- :param purge: delete the edge collection completely
+ :param purge: If set to True, the edge definition is not just removed
+ from the graph but the edge collection is also deleted completely
+ from the database.
:type purge: bool
- :returns: whether the operation was successful
+ :return: True if edge definition was deleted successfully.
:rtype: bool
- :raises arango.exceptions.EdgeDefinitionDeleteError: if the edge
- definition cannot be deleted
+ :raise arango.exceptions.EdgeDefinitionDeleteError: If delete fails.
"""
request = Request(
method='delete',
@@ -311,18 +344,17 @@ def delete_edge_definition(self, name, purge=False):
params={'dropCollection': purge}
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise EdgeDefinitionDeleteError(res)
- return not res.body['error']
+ def response_handler(resp):
+ if not resp.is_success:
+ raise EdgeDefinitionDeleteError(resp, request)
+ return True
- return request, handler
+ return self._execute(request, response_handler)
- ####################
- # Graph Traversals #
- ####################
+ ###################
+ # Graph Functions #
+ ###################
- @api_method
def traverse(self,
start_vertex,
direction='outbound',
@@ -341,59 +373,62 @@ def traverse(self,
expander_func=None):
"""Traverse the graph and return the visited vertices and edges.
- :param start_vertex: the collection and the key of the start vertex in
- the format ``"collection/key"``
- :type start_vertex: str | unicode
- :param direction: ``"outbound"`` (default), ``"inbound"`` or ``"any"``
+ :param start_vertex: Start vertex document ID or body with "_id" field.
+ :type start_vertex: str | unicode | dict
+ :param direction: Traversal direction. Allowed values are "outbound"
+ (default), "inbound" and "any".
:type direction: str | unicode
- :param item_order: ``"forward"`` (default) or ``"backward"``
+ :param item_order: Item iteration order. Allowed values are "forward"
+ (default) and "backward".
:type item_order: str | unicode
- :param strategy: ``"dfs"`` or ``"bfs"``
+ :param strategy: Traversal strategy. Allowed values are "depthfirst"
+ and "breadthfirst".
:type strategy: str | unicode
- :param order: ``"preorder"``, ``"postorder"``, ``"preorder-expander"``
- or ``None`` (default)
+ :param order: Traversal order. Allowed values are "preorder",
+ "postorder", and "preorder-expander".
:type order: str | unicode
- :param vertex_uniqueness: ``"global"``, ``"path"`` or ``None``
+ :param vertex_uniqueness: Uniqueness for visited vertices. Allowed
+ values are "global", "path" or "none".
:type vertex_uniqueness: str | unicode
- :param edge_uniqueness: ``"global"``, ``"path"`` or ``None``
+ :param edge_uniqueness: Uniqueness for visited edges. Allowed values
+ are "global", "path" or "none".
:type edge_uniqueness: str | unicode
- :param min_depth: the minimum depth of the nodes to visit
+ :param min_depth: Minimum depth of the nodes to visit.
:type min_depth: int
- :param max_depth: the maximum depth of the nodes to visit
+ :param max_depth: Maximum depth of the nodes to visit.
:type max_depth: int
- :param max_iter: halt the graph traversal after a maximum number of
- iterations (e.g. to prevent endless loops in cyclic graphs)
+ :param max_iter: If set, halt the traversal after the given number of
+ iterations. This parameter can be used to prevent endless loops in
+ cyclic graphs.
:type max_iter: int
- :param init_func: init function in Javascript with signature
- ``(config, result) -> void``, which is used to initialize values
+ :param init_func: Initialization function in Javascript with signature
+ ``(config, result) -> void``. This function is used to initialize
+ values in the result.
:type init_func: str | unicode
- :param sort_func: sort function in Javascript with signature
+ :param sort_func: Sorting function in Javascript with signature
``(left, right) -> integer``, which returns ``-1`` if ``left <
- right``, ``+1`` if ``left > right``, and ``0`` if ``left == right``
+ right``, ``+1`` if ``left > right`` and ``0`` if ``left == right``.
:type sort_func: str | unicode
- :param filter_func: filter function in Javascript with signature
- ``(config, vertex, path) -> mixed``, where mixed can be one of four
- possible values: ``"exclude"`` (do not visit the vertex),
- ``"prune"`` (do not follow the edges of the vertex), ``""`` or
- ``undefined`` (visit the vertex and its edges), or an Array
- (any combinations of the ``"mixed"``, ``"prune"``, ``""`` or
- ``undefined``).
+ :param filter_func: Filter function in Javascript with signature
+ ``(config, vertex, path) -> mixed``, where ``mixed`` can have one
+ of the following values (or an array with multiple): "exclude" (do
+ not visit the vertex), "prune" (do not follow the edges of the
+ vertex), or "undefined" (visit the vertex and follow its edges).
:type filter_func: str | unicode
- :param visitor_func: visitor function in Javascript with signature
- ``(config, result, vertex, path, connected) -> void``, where the
- return value is ignored, ``result`` is modified by reference, and
- ``connected`` is populated only when argument **order** is set to
- ``"preorder-expander"``
+ :param visitor_func: Visitor function in Javascript with signature
+ ``(config, result, vertex, path, connected) -> void``. The return
+ value is ignored, ``result`` is modified by reference, and
+ ``connected`` is populated only when parameter **order** is set to
+ "preorder-expander".
:type visitor_func: str | unicode
- :param expander_func: expander function in Javascript with signature
- ``(config, vertex, path) -> mixed``, which must return an array of
- the connections for vertex where each connection is an object with
- attributes edge and vertex
+ :param expander_func: Expander function in Javascript with signature
+ ``(config, vertex, path) -> mixed``. The function must return an
+ array of connections for ``vertex``. Each connection is an object
+ with attributes "edge" and "vertex".
:type expander_func: str | unicode
- :returns: the visited edges and vertices
+ :return: Visited edges and vertices.
:rtype: dict
- :raises arango.exceptions.GraphTraverseError: if the graph traversal
- cannot be executed
+ :raise arango.exceptions.GraphTraverseError: If traversal fails.
"""
if strategy is not None:
if strategy.lower() == 'dfs':
@@ -408,7 +443,7 @@ def traverse(self,
uniqueness['edges'] = edge_uniqueness
data = {
- 'startVertex': start_vertex,
+ 'startVertex': get_id(start_vertex),
'graphName': self._name,
'direction': direction,
'strategy': strategy,
@@ -430,9 +465,386 @@ def traverse(self,
data={k: v for k, v in data.items() if v is not None}
)
- def handler(res):
- if res.status_code not in HTTP_OK:
- raise GraphTraverseError(res)
- return res.body['result']['visited']
+ def response_handler(resp):
+ if not resp.is_success:
+ raise GraphTraverseError(resp, request)
+ return resp.body['result']['visited']
+
+ return self._execute(request, response_handler)
+
+ #####################
+ # Vertex Management #
+ #####################
+
+ def has_vertex(self, vertex, rev=None, check_rev=True):
+ """Check if the given vertex document exists in the graph.
+
+ :param vertex: Vertex document ID or body with "_id" field.
+ :type vertex: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **vertex** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **vertex** (if given) is
+ compared against the revision of target vertex document.
+ :type check_rev: bool
+ :return: True if vertex document exists, False otherwise.
+ :rtype: bool
+ :raise arango.exceptions.DocumentGetError: If check fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_vertex(vertex).has(vertex, rev, check_rev)
+
+ def vertex(self, vertex, rev=None, check_rev=True):
+ """Return a vertex document.
+
+ :param vertex: Vertex document ID or body with "_id" field.
+ :type vertex: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **vertex** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **vertex** (if given) is
+ compared against the revision of target vertex document.
+ :type check_rev: bool
+ :return: Vertex document or None if not found.
+ :rtype: dict | None
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_vertex(vertex).get(vertex, rev, check_rev)
+
+ def insert_vertex(self, collection, vertex, sync=None, silent=False):
+ """Insert a new vertex document.
+
+ :param collection: Vertex collection name.
+ :type collection: str | unicode
+ :param vertex: New vertex document to insert. If it has "_key" or "_id"
+ field, its value is used as key of the new vertex (otherwise it is
+ auto-generated). Any "_rev" field is ignored.
+ :type vertex: dict
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentInsertError: If insert fails.
+ """
+ return self.vertex_collection(collection).insert(vertex, sync, silent)
+
+ def update_vertex(self,
+ vertex,
+ check_rev=True,
+ keep_none=True,
+ sync=None,
+ silent=False):
+ """Update a vertex document.
+
+ :param vertex: Partial or full vertex document with updated values. It
+ must contain the "_id" field.
+ :type vertex: dict
+ :param check_rev: If set to True, revision of **vertex** (if given) is
+ compared against the revision of target vertex document.
+ :type check_rev: bool
+ :param keep_none: If set to True, fields with value None are retained
+ in the document. If set to False, they are removed completely.
+ :type keep_none: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentUpdateError: If update fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_vertex(vertex).update(
+ vertex=vertex,
+ check_rev=check_rev,
+ keep_none=keep_none,
+ sync=sync,
+ silent=silent
+ )
+
+ def replace_vertex(self, vertex, check_rev=True, sync=None, silent=False):
+ """Replace a vertex document.
+
+ :param vertex: New vertex document to replace the old one with. It must
+ contain the "_id" field.
+ :type vertex: dict
+ :param check_rev: If set to True, revision of **vertex** (if given) is
+ compared against the revision of target vertex document.
+ :type check_rev: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentReplaceError: If replace fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_vertex(vertex).replace(
+ vertex=vertex,
+ check_rev=check_rev,
+ sync=sync,
+ silent=silent
+ )
+
+ def delete_vertex(self,
+ vertex,
+ rev=None,
+ check_rev=True,
+ ignore_missing=False,
+ sync=None):
+ """Delete a vertex document.
+
+ :param vertex: Vertex document ID or body with "_id" field.
+ :type vertex: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **vertex** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **vertex** (if given) is
+ compared against the revision of target vertex document.
+ :type check_rev: bool
+ :param ignore_missing: Do not raise an exception on missing document.
+ This parameter has no effect in transactions where an exception is
+ always raised on failures.
+ :type ignore_missing: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :return: True if vertex was deleted successfully, False if vertex was
+ not found and **ignore_missing** was set to True (does not apply in
+ transactions).
+ :rtype: bool
+ :raise arango.exceptions.DocumentDeleteError: If delete fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_vertex(vertex).delete(
+ vertex=vertex,
+ rev=rev,
+ check_rev=check_rev,
+ ignore_missing=ignore_missing,
+ sync=sync
+ )
+
+ ###################
+ # Edge Management #
+ ###################
+
+ def has_edge(self, edge, rev=None, check_rev=True):
+ """Check if the given edge document exists in the graph.
+
+ :param edge: Edge document ID or body with "_id" field.
+ :type edge: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **edge** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **edge** (if given) is
+ compared against the revision of target edge document.
+ :type check_rev: bool
+ :return: True if edge document exists, False otherwise.
+ :rtype: bool
+ :raise arango.exceptions.DocumentInError: If check fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_edge(edge).has(edge, rev, check_rev)
+
+ def edge(self, edge, rev=None, check_rev=True):
+ """Return an edge document.
+
+ :param edge: Edge document ID or body with "_id" field.
+ :type edge: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **edge** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **edge** (if given) is
+ compared against the revision of target edge document.
+ :type check_rev: bool
+ :return: Edge document or None if not found.
+ :rtype: dict | None
+ :raise arango.exceptions.DocumentGetError: If retrieval fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_edge(edge).get(edge, rev, check_rev)
+
+ def insert_edge(self, collection, edge, sync=None, silent=False):
+ """Insert a new edge document.
+
+ :param collection: Edge collection name.
+ :type collection: str | unicode
+ :param edge: New edge document to insert. It must contain "_from" and
+ "_to" fields. If it has "_key" or "_id" field, its value is used
+ as key of the new edge document (otherwise it is auto-generated).
+ Any "_rev" field is ignored.
+ :type edge: dict
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentInsertError: If insert fails.
+ """
+ return self.edge_collection(collection).insert(edge, sync, silent)
+
+ def update_edge(self,
+ edge,
+ check_rev=True,
+ keep_none=True,
+ sync=None,
+ silent=False):
+ """Update an edge document.
+
+ :param edge: Partial or full edge document with updated values. It must
+ contain the "_id" field.
+ :type edge: dict
+ :param check_rev: If set to True, revision of **edge** (if given) is
+ compared against the revision of target edge document.
+ :type check_rev: bool
+ :param keep_none: If set to True, fields with value None are retained
+ in the document. If set to False, they are removed completely.
+ :type keep_none: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentUpdateError: If update fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_edge(edge).update(
+ edge=edge,
+ check_rev=check_rev,
+ keep_none=keep_none,
+ sync=sync,
+ silent=silent
+ )
- return request, handler
+ def replace_edge(self, edge, check_rev=True, sync=None, silent=False):
+ """Replace an edge document.
+
+ :param edge: New edge document to replace the old one with. It must
+ contain the "_id" field. It must also contain the "_from" and "_to"
+ fields.
+ :type edge: dict
+ :param check_rev: If set to True, revision of **edge** (if given) is
+ compared against the revision of target edge document.
+ :type check_rev: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentReplaceError: If replace fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_edge(edge).replace(
+ edge=edge,
+ check_rev=check_rev,
+ sync=sync,
+ silent=silent
+ )
+
+ def delete_edge(self,
+ edge,
+ rev=None,
+ check_rev=True,
+ ignore_missing=False,
+ sync=None):
+ """Delete an edge document.
+
+ :param edge: Edge document ID or body with "_id" field.
+ :type edge: str | unicode | dict
+ :param rev: Expected document revision. Overrides the value of "_rev"
+ field in **edge** if present.
+ :type rev: str | unicode
+ :param check_rev: If set to True, revision of **edge** (if given) is
+ compared against the revision of target edge document.
+ :type check_rev: bool
+ :param ignore_missing: Do not raise an exception on missing document.
+ This parameter has no effect in transactions where an exception is
+ always raised on failures.
+ :type ignore_missing: bool
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :return: True if edge was deleted successfully, False if edge was not
+ found and **ignore_missing** was set to True (does not apply in
+ transactions).
+ :rtype: bool
+ :raise arango.exceptions.DocumentDeleteError: If delete fails.
+ :raise arango.exceptions.DocumentRevisionError: If revisions mismatch.
+ """
+ return self._get_col_by_edge(edge).delete(
+ edge=edge,
+ rev=rev,
+ check_rev=check_rev,
+ ignore_missing=ignore_missing,
+ sync=sync
+ )
+
+ def link(self,
+ collection,
+ from_vertex,
+ to_vertex,
+ data=None,
+ sync=None,
+ silent=False):
+ """Insert a new edge document linking the given vertices.
+
+ :param collection: Edge collection name.
+ :type collection: str | unicode
+ :param from_vertex: "From" vertex document ID or body with "_id" field.
+ :type from_vertex: str | unicode | dict
+ :param to_vertex: "To" vertex document ID or body with "_id" field.
+ :type to_vertex: str | unicode | dict
+ :param data: Any extra data for the new edge document. If it has "_key"
+ or "_id" field, its value is used as key of the new edge document
+ (otherwise it is auto-generated).
+ :type data: dict
+ :param sync: Block until operation is synchronized to disk.
+ :type sync: bool
+ :param silent: If set to True, no document metadata is returned. This
+ can be used to save resources.
+ :type silent: bool
+ :return: Document metadata (e.g. document key, revision) or True if
+ parameter **silent** was set to True.
+ :rtype: bool | dict
+ :raise arango.exceptions.DocumentInsertError: If insert fails.
+ """
+ return self.edge_collection(collection).link(
+ from_vertex=from_vertex,
+ to_vertex=to_vertex,
+ data=data,
+ sync=sync,
+ silent=silent
+ )
+
+ def edges(self, collection, vertex, direction=None):
+ """Return the edge documents coming in and/or out of given vertex.
+
+ :param collection: Edge collection name.
+ :type collection: str | unicode
+ :param vertex: Vertex document ID or body with "_id" field.
+ :type vertex: str | unicode | dict
+ :param direction: The direction of the edges. Allowed values are "in"
+ and "out". If not set, edges in both directions are returned.
+ :type direction: str | unicode
+ :return: List of edges and statistics.
+ :rtype: dict
+ :raise arango.exceptions.EdgeListError: If retrieval fails.
+ """
+ return self.edge_collection(collection).edges(vertex, direction)
diff --git a/arango/http.py b/arango/http.py
new file mode 100644
index 00000000..498f5a24
--- /dev/null
+++ b/arango/http.py
@@ -0,0 +1,92 @@
+from __future__ import absolute_import, unicode_literals
+
+__all__ = ['HTTPClient', 'DefaultHTTPClient']
+
+from abc import ABCMeta, abstractmethod
+
+import requests
+
+from arango.response import Response
+
+
+class HTTPClient(object): # pragma: no cover
+ """Abstract base class for HTTP clients."""
+
+ __metaclass__ = ABCMeta
+
+ @abstractmethod
+ def send_request(self,
+ method,
+ url,
+ headers=None,
+ params=None,
+ data=None,
+ auth=None):
+ """Send an HTTP request.
+
+ This method must be overridden by the user.
+
+ :param method: HTTP method in lowercase (e.g. "post").
+ :type method: str | unicode
+ :param url: Request URL.
+ :type url: str | unicode
+ :param headers: Request headers.
+ :type headers: dict
+ :param params: URL (query) parameters.
+ :type params: dict
+ :param data: Request payload.
+ :type data: str | unicode | bool | int | list | dict
+ :param auth: Username and password.
+ :type auth: tuple
+ :returns: HTTP response.
+ :rtype: arango.response.Response
+ """
+ raise NotImplementedError
+
+
+class DefaultHTTPClient(HTTPClient):
+ """Default HTTP client implementation."""
+
+ def __init__(self):
+ self._session = requests.Session()
+
+ def send_request(self,
+ method,
+ url,
+ params=None,
+ data=None,
+ headers=None,
+ auth=None):
+ """Send an HTTP request.
+
+ :param method: HTTP method in lowercase (e.g. "post").
+ :type method: str | unicode
+ :param url: Request URL.
+ :type url: str | unicode
+ :param headers: Request headers.
+ :type headers: dict
+ :param params: URL (query) parameters.
+ :type params: dict
+ :param data: Request payload.
+ :type data: str | unicode | bool | int | list | dict
+ :param auth: Username and password.
+ :type auth: tuple
+ :returns: HTTP response.
+ :rtype: arango.response.Response
+ """
+ raw_resp = self._session.request(
+ method=method,
+ url=url,
+ params=params,
+ data=data,
+ headers=headers,
+ auth=auth,
+ )
+ return Response(
+ method=raw_resp.request.method,
+ url=raw_resp.url,
+ headers=raw_resp.headers,
+ status_code=raw_resp.status_code,
+ status_text=raw_resp.reason,
+ raw_body=raw_resp.text,
+ )
diff --git a/arango/http_clients/__init__.py b/arango/http_clients/__init__.py
deleted file mode 100644
index 2e12fd49..00000000
--- a/arango/http_clients/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from arango.http_clients.default import DefaultHTTPClient
diff --git a/arango/http_clients/base.py b/arango/http_clients/base.py
deleted file mode 100644
index d851ce13..00000000
--- a/arango/http_clients/base.py
+++ /dev/null
@@ -1,120 +0,0 @@
-from abc import ABCMeta, abstractmethod
-
-
-class BaseHTTPClient(object): # pragma: no cover
- """Base class for ArangoDB clients.
-
- The methods must return an instance of :class:`arango.response.Response`.
- """
-
- __metaclass__ = ABCMeta
-
- @abstractmethod
- def head(self, url, params=None, headers=None, auth=None):
- """Execute an HTTP **HEAD** method.
-
- :param url: request URL
- :type url: str | unicode
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- raise NotImplementedError
-
- @abstractmethod
- def get(self, url, params=None, headers=None, auth=None):
- """Execute an HTTP **GET** method.
-
- :param url: request URL
- :type url: str | unicode
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- raise NotImplementedError
-
- @abstractmethod
- def put(self, url, data, params=None, headers=None, auth=None):
- """Execute an HTTP **PUT** method.
-
- :param url: request URL
- :type url: str | unicode
- :param data: request payload
- :type data: str | unicode | dict
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- raise NotImplementedError
-
- @abstractmethod
- def post(self, url, data, params=None, headers=None, auth=None):
- """Execute an HTTP **POST** method.
-
- :param url: request URL
- :type url: str | unicode
- :param data: request payload
- :type data: str | unicode | dict
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- raise NotImplementedError
-
- @abstractmethod
- def patch(self, url, data, params=None, headers=None, auth=None):
- """Execute an HTTP **PATCH** method.
-
- :param url: request URL
- :type url: str | unicode
- :param data: request payload
- :type data: str | unicode | dict
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- raise NotImplementedError
-
- @abstractmethod
- def delete(self, url, data=None, params=None, headers=None, auth=None):
- """Execute an HTTP **DELETE** method.
-
- :param url: request URL
- :type url: str | unicode
- :param data: request payload
- :type data: str | unicode | dict
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- raise NotImplementedError
diff --git a/arango/http_clients/default.py b/arango/http_clients/default.py
deleted file mode 100644
index 25ed7f07..00000000
--- a/arango/http_clients/default.py
+++ /dev/null
@@ -1,213 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-import requests
-
-from arango.response import Response
-from arango.http_clients.base import BaseHTTPClient
-
-
-class DefaultHTTPClient(BaseHTTPClient):
- """Session based HTTP client for ArangoDB using the requests_ library.
-
- .. _requests: http://docs.python-requests.org/en/master/
- """
-
- def __init__(self, use_session=True, check_cert=True):
- """Initialize the session."""
- if use_session:
- self._session = requests.Session()
- else:
- self._session = requests
- self._check_cert = check_cert
-
- def head(self, url, params=None, headers=None, auth=None):
- """Execute an HTTP **HEAD** method.
-
- :param url: request URL
- :type url: str | unicode
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- res = self._session.head(
- url=url,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="head",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
-
- def get(self, url, params=None, headers=None, auth=None):
- """Execute an HTTP **GET** method.
-
- :param url: request URL
- :type url: str | unicode
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- res = self._session.get(
- url=url,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="get",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
-
- def put(self, url, data, params=None, headers=None, auth=None):
- """Execute an HTTP **PUT** method.
-
- :param url: request URL
- :type url: str | unicode
- :param data: request payload
- :type data: str | unicode | dict
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- res = self._session.put(
- url=url,
- data=data,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="put",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
-
- def post(self, url, data, params=None, headers=None, auth=None):
- """Execute an HTTP **POST** method.
-
- :param url: request URL
- :type url: str | unicode
- :param data: request payload
- :type data: str | unicode | dict
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- res = self._session.post(
- url=url,
- data=data,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="post",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
-
- def patch(self, url, data, params=None, headers=None, auth=None):
- """Execute an HTTP **PATCH** method.
-
- :param url: request URL
- :type url: str | unicode
- :param data: request payload
- :type data: str | unicode | dict
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- res = self._session.patch(
- url=url,
- data=data,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="patch",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
-
- def delete(self, url, data=None, params=None, headers=None, auth=None):
- """Execute an HTTP **DELETE** method.
-
- :param url: request URL
- :type url: str | unicode
- :param data: request payload
- :type data: str | unicode | dict
- :param params: request parameters
- :type params: dict
- :param headers: request headers
- :type headers: dict
- :param auth: username and password tuple
- :type auth: tuple
- :returns: ArangoDB HTTP response object
- :rtype: arango.response.Response
- """
- res = self._session.delete(
- url=url,
- data=data,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="delete",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
diff --git a/arango/job.py b/arango/job.py
new file mode 100644
index 00000000..b9e468cc
--- /dev/null
+++ b/arango/job.py
@@ -0,0 +1,292 @@
+from __future__ import absolute_import, unicode_literals
+
+from uuid import uuid4
+
+from arango.exceptions import (
+ AsyncJobCancelError,
+ AsyncJobStatusError,
+ AsyncJobResultError,
+ AsyncJobClearError,
+ BatchJobResultError,
+ TransactionJobResultError,
+)
+from arango.request import Request
+
+
+class Job(object): # pragma: no cover
+ """Base class for API execution jobs.
+
+ Jobs are used to track progress of API executions, and retrieve results.
+ """
+
+ @property
+ def id(self):
+ """Return the job ID.
+
+ :return: Job ID.
+ :rtype: str | unicode
+ """
+ raise NotImplementedError
+
+ def status(self):
+ """Return the job status.
+
+ :return: Job status.
+ :rtype: str | unicode
+ """
+ raise NotImplementedError
+
+ def result(self):
+ """Return the job result (if available).
+
+ :return: Job result.
+ :rtype: str | unicode | bool | int | list | dict
+ :raise arango.exceptions.ArangoError: If result was an error.
+ """
+ raise NotImplementedError
+
+
+class AsyncJob(Job):
+ """Job for tracking and retrieving result of an async execution.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param job_id: Async job ID.
+ :type job_id: str | unicode
+ :param response_handler: HTTP response handler.
+ :type response_handler: callable
+ """
+
+ __slots__ = ['_conn', '_id', '_response_handler']
+
+ def __init__(self, connection, job_id, response_handler):
+ self._conn = connection
+ self._id = job_id
+ self._response_handler = response_handler
+
+ def __repr__(self):
+ return ''.format(self._id)
+
+ @property
+ def id(self):
+ """Return the async job ID.
+
+ :return: Async job ID.
+ :rtype: str | unicode
+ """
+ return self._id
+
+ def status(self):
+ """Return the async job status from server.
+
+ Once a job result is retrieved via func:`arango.job.AsyncJob.result`
+ method, it is deleted from server and subsequent status queries will
+ fail.
+
+ :return: Async job status. Possible values are "pending" (job is still
+ in queue), "done" (job finished or raised an error), or "cancelled"
+ (job was cancelled before completion).
+ :rtype: str | unicode
+ :raise arango.exceptions.AsyncJobStatusError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/job/{}'.format(self._id)
+ )
+ resp = self._conn.send_request(request)
+ if resp.status_code == 204:
+ return 'pending'
+ elif resp.is_success:
+ return 'done'
+ elif resp.error_code == 404:
+ error_message = 'job {} not found'.format(self._id)
+ raise AsyncJobStatusError(resp, request, error_message)
+ else:
+ raise AsyncJobStatusError(resp, request)
+
+ def result(self):
+ """Return the async job result from server.
+
+ If the job raised an exception, it is propagated up at this point.
+
+ Once job result is retrieved, it is deleted from server and subsequent
+ queries for result will fail.
+
+ :return: Async job result.
+ :rtype: str | unicode | bool | int | list | dict
+ :raise arango.exceptions.ArangoError: If the job raised an exception.
+ :raise arango.exceptions.AsyncJobResultError: If retrieval fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/job/{}'.format(self._id)
+ )
+ resp = self._conn.send_request(request)
+ headers = resp.headers
+ if 'X-Arango-Async-Id' in headers or 'x-arango-async-id' in headers:
+ return self._response_handler(resp)
+ if resp.status_code == 204:
+ error_message = 'job {} not done'.format(self._id)
+ raise AsyncJobResultError(resp, request, error_message)
+ elif resp.error_code == 404:
+ error_message = 'job {} not found'.format(self._id)
+ raise AsyncJobResultError(resp, request, error_message)
+ else:
+ raise AsyncJobResultError(resp, request)
+
+ def cancel(self, ignore_missing=False):
+ """Cancel the async job.
+
+ An async job cannot be cancelled once it is taken out of the queue.
+
+ :param ignore_missing: Do not raise an exception on missing job.
+ :type ignore_missing: bool
+ :return: True if job was cancelled successfully, False if the job
+ was not found but **ignore_missing** was set to True.
+ :rtype: bool
+ :raise arango.exceptions.AsyncJobCancelError: If cancel fails.
+ """
+ request = Request(
+ method='put',
+ endpoint='/_api/job/{}/cancel'.format(self._id)
+ )
+ resp = self._conn.send_request(request)
+ if resp.status_code == 200:
+ return True
+ elif resp.error_code == 404:
+ if ignore_missing:
+ return False
+ error_message = 'job {} not found'.format(self._id)
+ raise AsyncJobCancelError(resp, request, error_message)
+ else:
+ raise AsyncJobCancelError(resp, request)
+
+ def clear(self, ignore_missing=False):
+ """Delete the job result from the server.
+
+ :param ignore_missing: Do not raise an exception on missing job.
+ :type ignore_missing: bool
+ :return: True if result was deleted successfully, False if the job
+ was not found but **ignore_missing** was set to True.
+ :rtype: bool
+ :raise arango.exceptions.AsyncJobClearError: If delete fails.
+ """
+ request = Request(
+ method='delete',
+ endpoint='/_api/job/{}'.format(self._id)
+ )
+ resp = self._conn.send_request(request)
+ if resp.is_success:
+ return True
+ elif resp.error_code == 404:
+ if ignore_missing:
+ return False
+ error_message = 'job {} not found'.format(self._id)
+ raise AsyncJobClearError(resp, request, error_message)
+ else:
+ raise AsyncJobClearError(resp, request)
+
+
+class BatchJob(Job):
+ """Job for tracking and retrieving result of batch execution.
+
+ :param response_handler: HTTP response handler.
+ :type response_handler: callable
+ """
+
+ __slots__ = ['_id', '_status', '_response', '_response_handler']
+
+ def __init__(self, response_handler):
+ self._id = uuid4().hex
+ self._status = 'pending'
+ self._response = None
+ self._response_handler = response_handler
+
+ def __repr__(self):
+ return ''.format(self._id)
+
+ @property
+ def id(self):
+ """Return the batch job ID.
+
+ :return: Batch job ID.
+ :rtype: str | unicode
+ """
+ return self._id
+
+ def status(self):
+ """Return the batch job status.
+
+ :return: Batch job status. Possible values are "pending" (job is still
+ waiting for batch to be committed), or "done" (batch was committed
+ and the job is updated with the result).
+ :rtype: str | unicode
+ """
+ return self._status
+
+ def result(self):
+ """Return the batch job result.
+
+ If the job raised an exception, it is propagated up at this point.
+
+ :return: Batch job result.
+ :rtype: str | unicode | bool | int | list | dict
+ :raise arango.exceptions.ArangoError: If the job raised an exception.
+ :raise arango.exceptions.BatchJobResultError: If job result is not
+ available (i.e. batch is not committed yet).
+ """
+ if self._status == 'pending':
+ raise BatchJobResultError('result not available yet')
+ return self._response_handler(self._response)
+
+
+class TransactionJob(Job):
+ """Transaction API execution job.
+
+ :param response_handler: HTTP response handler.
+ :type response_handler: callable
+ """
+
+ __slots__ = ['_id', '_status', '_response', '_response_handler']
+
+ def __init__(self, response_handler):
+ self._id = uuid4().hex
+ self._status = 'pending'
+ self._response = None
+ self._response_handler = response_handler
+
+ def __repr__(self):
+ return ''.format(self._id)
+
+ @property
+ def id(self):
+ """Return the transaction job ID.
+
+ :return: Transaction job ID.
+ :rtype: str | unicode
+ """
+ return self._id
+
+ def status(self):
+ """Return the transaction job status.
+
+ :return: Transaction job status. Possible values are "pending" (job is
+ waiting for transaction to be committed, or transaction failed and
+ job is orphaned), or "done" (transaction was committed and job is
+ updated with the result).
+ :rtype: str | unicode
+ """
+ return self._status
+
+ def result(self):
+ """Return the transaction job result.
+
+ :return: Transaction job result.
+ :rtype: str | unicode | bool | int | list | dict
+ :raise arango.exceptions.ArangoError: If the job raised an exception.
+ :raise arango.exceptions.TransactionJobResultError: If job result is
+ not available (i.e. transaction is not committed yet or failed).
+ """
+ if self._status == 'pending':
+ raise TransactionJobResultError('result not available yet')
+ return self._response_handler(self._response)
diff --git a/arango/pregel.py b/arango/pregel.py
new file mode 100644
index 00000000..4e7d51b7
--- /dev/null
+++ b/arango/pregel.py
@@ -0,0 +1,147 @@
+from __future__ import absolute_import, unicode_literals
+
+__all__ = ['Pregel']
+
+from arango.api import APIWrapper
+from arango.exceptions import (
+ PregelJobGetError,
+ PregelJobCreateError,
+ PregelJobDeleteError
+)
+from arango.request import Request
+
+
+class Pregel(APIWrapper):
+ """Pregel API wrapper.
+
+ :param connection: HTTP connection.
+ :type connection: arango.connection.Connection
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
+ """
+
+ def __init__(self, connection, executor):
+ super(Pregel, self).__init__(connection, executor)
+
+ def __repr__(self):
+ return ''.format(self._conn.db_name)
+
+ def job(self, job_id):
+ """Return the details of a Pregel job.
+
+ :param job_id: Pregel job ID.
+ :type job_id: int
+ :return: Details of the Pregel job.
+ :rtype: dict
+ :raise arango.exceptions.PregelJobGetError: If retrieval fails.
+ """
+ request = Request(
+ method='get',
+ endpoint='/_api/control_pregel/{}'.format(job_id)
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise PregelJobGetError(resp, request)
+ if 'receivedCount' in resp.body:
+ resp.body['received_count'] = resp.body.pop('receivedCount')
+ if 'sendCount' in resp.body:
+ resp.body['send_count'] = resp.body.pop('sendCount')
+ if 'totalRuntime' in resp.body:
+ resp.body['total_runtime'] = resp.body.pop('totalRuntime')
+ if 'edgeCount' in resp.body: # pragma: no cover
+ resp.body['edge_count'] = resp.body.pop('edgeCount')
+ if 'vertexCount' in resp.body: # pragma: no cover
+ resp.body['vertex_count'] = resp.body.pop('vertexCount')
+ return resp.body
+
+ return self._execute(request, response_handler)
+
+ def create_job(self,
+ graph,
+ algorithm,
+ store=True,
+ max_gss=None,
+ thread_count=None,
+ async_mode=None,
+ result_field=None,
+ algorithm_params=None):
+ """Start a new Pregel job.
+
+ :param graph: Graph name.
+ :type graph: str | unicode
+ :param algorithm: Algorithm (e.g. "pagerank").
+ :type algorithm: str | unicode
+ :param store: If set to True, Pregel engine writes results back to the
+ database. If set to False, results can be queried via AQL.
+ :type store: bool
+ :param max_gss: Max number of global iterations for the algorithm.
+ :type max_gss: int
+ :param thread_count: Number of parallel threads to use per worker.
+ This does not influence the number of threads used to load or store
+ data from the database (it depends on the number of shards).
+ :type thread_count: int
+ :param async_mode: If set to True, algorithms which support async mode
+ run without synchronized global iterations. This might lead to
+ performance increase if there are load imbalances.
+ :type async_mode: bool
+ :param result_field: If specified, most algorithms will write their
+ results into this field.
+ :type result_field: str | unicode
+ :param algorithm_params: Additional algorithm parameters.
+ :type algorithm_params: dict
+ :return: Pregel job ID.
+ :rtype: int
+ :raise arango.exceptions.PregelJobCreateError: If create fails.
+ """
+ data = {'algorithm': algorithm, 'graphName': graph}
+
+ if algorithm_params is None:
+ algorithm_params = {}
+
+ if store is not None:
+ algorithm_params['store'] = store
+ if max_gss is not None:
+ algorithm_params['maxGSS'] = max_gss
+ if thread_count is not None:
+ algorithm_params['parallelism'] = thread_count
+ if async_mode is not None:
+ algorithm_params['async'] = async_mode
+ if result_field is not None:
+ algorithm_params['resultField'] = result_field
+ if algorithm_params:
+ data['params'] = algorithm_params
+
+ request = Request(
+ method='post',
+ endpoint='/_api/control_pregel',
+ data=data
+ )
+
+ def response_handler(resp):
+ if resp.is_success:
+ return resp.body
+ raise PregelJobCreateError(resp, request)
+
+ return self._execute(request, response_handler)
+
+ def delete_job(self, job_id):
+ """Delete a Pregel job.
+
+ :param job_id: Pregel job ID.
+ :type job_id: int
+ :return: True if Pregel job was deleted successfully.
+ :rtype: bool
+ :raise arango.exceptions.PregelJobDeleteError: If delete fails.
+ """
+ request = Request(
+ method='delete',
+ endpoint='/_api/control_pregel/{}'.format(job_id)
+ )
+
+ def response_handler(resp):
+ if resp.is_success:
+ return True
+ raise PregelJobDeleteError(resp, request)
+
+ return self._execute(request, response_handler)
diff --git a/arango/request.py b/arango/request.py
index 38752a6c..fe98b7c3 100644
--- a/arango/request.py
+++ b/arango/request.py
@@ -1,15 +1,48 @@
from __future__ import absolute_import, unicode_literals
-from json import dumps
+__all__ = ['Request']
-from six import moves
+import json
+
+from six import moves, string_types
class Request(object):
- """ArangoDB API request object.
+ """HTTP request.
+
+ :param method: HTTP method in lowercase (e.g. "post").
+ :type method: str | unicode
+ :param endpoint: API endpoint.
+ :type endpoint: str | unicode
+ :param headers: Request headers.
+ :type headers: dict
+ :param params: URL parameters.
+ :type params: dict
+ :param data: Request payload.
+ :type data: str | unicode | bool | int | list | dict
+ :param command: ArangoSh command.
+ :type command: str | unicode
+ :param read: Names of collections read during transaction.
+ :type read: str | unicode | [str | unicode]
+ :param write: Names of collections written to during transaction.
+ :type write: str | unicode | [str | unicode]
- .. note::
- This class is meant to be used internally only.
+ :ivar method: HTTP method in lowercase (e.g. "post").
+ :vartype method: str | unicode
+ :ivar endpoint: API endpoint.
+ :vartype endpoint: str | unicode
+ :ivar headers: Request headers.
+ :vartype headers: dict
+ :ivar params: URL (query) parameters.
+ :vartype params: dict
+ :ivar data: Request payload.
+ :vartype data: str | unicode | bool | int | list | dict
+ :ivar command: ArangoSh command.
+ :vartype command: str | unicode | None
+ :ivar read: Names of collections read during transaction.
+ :vartype read: str | unicode | [str | unicode] | None
+ :ivar write: Names of collections written to during transaction.
+ :vartype write: str | unicode | [str | unicode] | None
"""
__slots__ = (
@@ -19,6 +52,8 @@ class Request(object):
'params',
'data',
'command',
+ 'read',
+ 'write'
)
def __init__(self,
@@ -27,33 +62,46 @@ def __init__(self,
headers=None,
params=None,
data=None,
- command=None):
+ command=None,
+ read=None,
+ write=None):
self.method = method
self.endpoint = endpoint
self.headers = headers or {}
- self.params = params or {}
- self.data = data
- self.command = command
- @property
- def kwargs(self):
- return {
- 'endpoint': self.endpoint,
- 'headers': self.headers,
- 'params': self.params,
- 'data': self.data,
- }
+ # Insert default headers.
+ self.headers['content-type'] = 'application/json'
+ self.headers['charset'] = 'utf-8'
+
+ # Sanitize URL params.
+ if params is not None:
+ for key, val in params.items():
+ if isinstance(val, bool):
+ params[key] = int(val)
+ self.params = params
+
+ # Normalize the payload.
+ if data is None:
+ self.data = None
+ elif isinstance(data, string_types):
+ self.data = data
+ else:
+ self.data = json.dumps(data)
+
+ # Set the transaction metadata.
+ self.command = command
+ self.read = read
+ self.write = write
- def stringify(self):
+ def __str__(self):
+ """Return the request details in string form."""
path = self.endpoint
if self.params is not None:
- path += "?" + moves.urllib.parse.urlencode(self.params)
- request_string = "{} {} HTTP/1.1".format(self.method, path)
+ path += '?' + moves.urllib.parse.urlencode(self.params)
+ request_strings = ['{} {} HTTP/1.1'.format(self.method, path)]
if self.headers is not None:
- for key, value in self.headers.items():
- request_string += "\r\n{key}: {value}".format(
- key=key, value=value
- )
+ for key, value in sorted(self.headers.items()):
+ request_strings.append('{}: {}'.format(key, value))
if self.data is not None:
- request_string += "\r\n\r\n{}".format(dumps(self.data))
- return request_string
+ request_strings.append('\r\n{}'.format(self.data))
+ return '\r\n'.join(request_strings)
diff --git a/arango/response.py b/arango/response.py
index 3dcafa21..414df9cc 100644
--- a/arango/response.py
+++ b/arango/response.py
@@ -1,27 +1,46 @@
+from __future__ import absolute_import, unicode_literals
+
+__all__ = ['Response']
+
import json
class Response(object):
- """ArangoDB HTTP response.
+ """HTTP response.
- Overridden methods of :class:`arango.http_clients.base.BaseHTTPClient` must
- return instances of this.
-
- :param method: The HTTP method name (e.g. ``"post"``).
+ :param method: HTTP method in lowercase (e.g. "post").
:type method: str | unicode
- :param url: The request URL
- (e.g. ``"http://localhost:8529/_db/_system/_api/database"``)
+ :param url: API URL.
:type url: str | unicode
- :param headers: A dict-like mapping object containing the HTTP headers.
- Must allow case-insensitive key access.
- :type headers: collections.MutableMapping
- :param http_code: The HTTP status code.
- :type http_code: int
- :param http_text: The HTTP status text. This is used only for printing
- error messages, and has no specification to follow.
- :type http_text: str | unicode
- :param body: The HTTP response body.
- :type body: str | unicode | dict
+ :param headers: Response headers.
+ :type headers: requests.structures.CaseInsensitiveDict | dict
+ :param status_code: Response status code.
+ :type status_code: int
+ :param status_text: Response status text.
+ :type status_text: str | unicode
+ :param raw_body: Raw response body.
+ :type raw_body: str | unicode
+
+ :ivar method: HTTP method in lowercase (e.g. "post").
+ :vartype method: str | unicode
+ :ivar url: API URL.
+ :vartype url: str | unicode
+ :ivar headers: Response headers.
+ :vartype headers: requests.structures.CaseInsensitiveDict | dict
+ :ivar status_code: Response status code.
+ :vartype status_code: int
+ :ivar status_text: Response status text.
+ :vartype status_text: str | unicode
+ :ivar body: JSON-deserialized response body.
+ :vartype body: str | unicode | bool | int | list | dict
+ :ivar raw_body: Raw response body.
+ :vartype raw_body: str | unicode
+ :ivar error_code: Error code from ArangoDB server.
+ :vartype error_code: int
+ :ivar error_message: Error message from ArangoDB server.
+ :vartype error_message: str | unicode
+ :ivar is_success: True if response status code was 2XX.
+ :vartype is_success: bool
"""
__slots__ = (
@@ -30,42 +49,40 @@ class Response(object):
'headers',
'status_code',
'status_text',
- 'raw_body',
'body',
+ 'raw_body',
'error_code',
- 'error_message'
+ 'error_message',
+ 'is_success',
)
def __init__(self,
- method=None,
- url=None,
- headers=None,
- http_code=None,
- http_text=None,
- body=None):
+ method,
+ url,
+ headers,
+ status_code,
+ status_text,
+ raw_body):
+ self.method = method.lower()
self.url = url
- self.method = method
self.headers = headers
- self.status_code = http_code
- self.status_text = http_text
- self.raw_body = body
+ self.status_code = status_code
+ self.status_text = status_text
+ self.raw_body = raw_body
+
+ # De-serialize the response body.
try:
- self.body = json.loads(body)
+ self.body = json.loads(raw_body)
except (ValueError, TypeError):
- self.body = body
- if self.body and isinstance(self.body, dict):
+ self.body = raw_body
+
+ # Extract error code and message.
+ if isinstance(self.body, dict):
self.error_code = self.body.get('errorNum')
self.error_message = self.body.get('errorMessage')
else:
self.error_code = None
self.error_message = None
- def update_body(self, new_body):
- return Response(
- url=self.url,
- method=self.method,
- headers=self.headers,
- http_code=self.status_code,
- http_text=self.status_text,
- body=new_body
- )
+ http_ok = 200 <= status_code < 300
+ self.is_success = http_ok and self.error_code is None
diff --git a/arango/transaction.py b/arango/transaction.py
deleted file mode 100644
index cbd7c4b3..00000000
--- a/arango/transaction.py
+++ /dev/null
@@ -1,171 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from uuid import uuid4
-
-from arango.collections import Collection
-from arango.connection import Connection
-from arango.utils import HTTP_OK
-from arango.exceptions import TransactionError
-
-
-class Transaction(Connection):
- """ArangoDB transaction object.
-
- API requests made in a transaction are queued in memory and executed as a
- whole in a single HTTP call to ArangoDB server.
-
- :param connection: ArangoDB database connection
- :type connection: arango.connection.Connection
- :param read: the name(s) of the collection(s) to read from
- :type read: str | unicode | list
- :param write: the name(s) of the collection(s) to write to
- :type write: str | unicode | list
- :param sync: wait for the operation to sync to disk
- :type sync: bool
- :param timeout: timeout on the collection locks
- :type timeout: int
- :param commit_on_error: only applicable when *context managers* are used
- to execute the transaction: if ``True``, the requests queued so
- far are committed even if an exception is raised before exiting out of
- the context
- :type commit_on_error: bool
-
- .. note::
- Only writes are possible at the moment in a transaction.
- """
-
- def __init__(self,
- connection,
- read=None,
- write=None,
- sync=None,
- timeout=None,
- commit_on_error=False):
- super(Transaction, self).__init__(
- protocol=connection.protocol,
- host=connection.host,
- port=connection.port,
- username=connection.username,
- password=connection.password,
- http_client=connection.http_client,
- database=connection.database,
- enable_logging=connection.logging_enabled
- )
- self._id = uuid4()
- self._actions = ['db = require("internal").db']
- self._collections = {}
- if read:
- self._collections['read'] = read
- if write:
- self._collections['write'] = write
- self._timeout = timeout
- self._sync = sync
- self._commit_on_error = commit_on_error
- self._type = 'transaction'
-
- def __repr__(self):
- return ''.format(self._id)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exception, *_):
- if exception is None or self._commit_on_error:
- return self.commit()
-
- @property
- def id(self):
- """Return the UUID of the transaction.
-
- :return: the UUID of the transaction
- :rtype: str | unicode
- """
- return self._id
-
- def handle_request(self, request, handler):
- """Handle the incoming request and response handler.
-
- :param request: the API request queued as part of the transaction, and
- executed only when the current transaction is committed via method
- :func:`arango.batch.BatchExecution.commit`
- :type request: arango.request.Request
- :param handler: the response handler
- :type handler: callable
- """
- if request.command is None:
- raise TransactionError('unsupported method')
- self._actions.append(request.command)
-
- def execute(self, command, params=None, sync=None, timeout=None):
- """Execute raw Javascript code in a transaction.
-
- :param command: the raw Javascript code
- :type command: str | unicode
- :param params: optional arguments passed into the code
- :type params: dict
- :param sync: wait for the operation to sync to disk (overrides the
- value specified during the transaction object instantiation)
- :type sync: bool
- :param timeout: timeout on the collection locks (overrides the value
- value specified during the transaction object instantiation)
- :type timeout: int
- :return: the result of the transaction
- :rtype: dict
- :raises arango.exceptions.TransactionError: if the transaction cannot
- be executed
- """
- data = {'collections': self._collections, 'action': command}
- timeout = self._timeout if timeout is None else timeout
- sync = self._sync if sync is None else sync
-
- if timeout is not None:
- data['lockTimeout'] = timeout
- if sync is not None:
- data['waitForSync'] = sync
- if params is not None:
- data['params'] = params
-
- res = self.post(endpoint='/_api/transaction', data=data)
- if res.status_code not in HTTP_OK:
- raise TransactionError(res)
- return res.body.get('result')
-
- def commit(self):
- """Execute the queued API requests in a single atomic step.
-
- :return: the result of the transaction
- :rtype: dict
- :raises arango.exceptions.TransactionError: if the transaction cannot
- be executed
- """
- try:
- action = ';'.join(self._actions)
- res = self.post(
- endpoint='/_api/transaction',
- data={
- 'collections': self._collections,
- 'action': 'function () {{ {} }}'.format(action)
- },
- params={
- 'lockTimeout': self._timeout,
- 'waitForSync': self._sync,
- }
- )
- if res.status_code not in HTTP_OK:
- raise TransactionError(res)
- return res.body.get('result')
- finally:
- self._actions = ['db = require("internal").db']
-
- def collection(self, name):
- """Return the collection object tailored for transactions.
-
- API requests via the returned object are placed in an in-memory queue
- and committed as a whole in a single HTTP call to the ArangoDB server.
-
- :param name: the name of the collection
- :type name: str | unicode
- :returns: the collection object
- :rtype: arango.collections.Collection
- """
- return Collection(self, name)
diff --git a/arango/utils.py b/arango/utils.py
index 89fa6034..b4098e01 100644
--- a/arango/utils.py
+++ b/arango/utils.py
@@ -1,18 +1,51 @@
from __future__ import absolute_import, unicode_literals
-from json import dumps
+import logging
+from contextlib import contextmanager
-from six import string_types
+from arango.exceptions import DocumentParseError
-# Set of HTTP OK status codes
-HTTP_OK = {200, 201, 202, 203, 204, 205, 206}
-HTTP_AUTH_ERR = {401, 403}
+@contextmanager
+def suppress_warning(logger_name):
+ """Suppress logger messages.
-def sanitize(data):
- if data is None:
- return None
- elif isinstance(data, string_types):
- return data
- else:
- return dumps(data)
+ :param logger_name: Full name of the logger.
+ :type logger_name: str | unicode
+ """
+ logger = logging.getLogger(logger_name)
+ original_log_level = logger.getEffectiveLevel()
+ logger.setLevel(logging.CRITICAL)
+ yield
+ logger.setLevel(original_log_level)
+
+
+def get_col_name(doc):
+ """Return the collection name from input.
+
+ :param doc: Document ID or body with "_id" field.
+ :type doc: str | unicode | dict
+ :return: Collection name.
+ :rtype: [str | unicode]
+ :raise arango.exceptions.DocumentParseError: If document ID is missing.
+ """
+ try:
+ doc_id = doc['_id'] if isinstance(doc, dict) else doc
+ except KeyError:
+ raise DocumentParseError('field "_id" required')
+ return doc_id.split('/', 1)[0]
+
+
+def get_id(doc):
+ """Return the document ID from input.
+
+ :param doc: Document ID or body with "_id" field.
+ :type doc: str | unicode | dict
+ :return: Document ID.
+ :rtype: str | unicode
+ :raise arango.exceptions.DocumentParseError: If document ID is missing.
+ """
+ try:
+ return doc['_id'] if isinstance(doc, dict) else doc
+ except KeyError:
+ raise DocumentParseError('field "_id" required')
diff --git a/arango/version.py b/arango/version.py
index fee77de7..d6497a81 100644
--- a/arango/version.py
+++ b/arango/version.py
@@ -1 +1 @@
-VERSION = '3.12.1'
+__version__ = '4.0.0'
diff --git a/arango/wal.py b/arango/wal.py
index 94ea1e34..f559cb76 100644
--- a/arango/wal.py
+++ b/arango/wal.py
@@ -1,71 +1,101 @@
from __future__ import absolute_import, unicode_literals
-from arango.utils import HTTP_OK
+__all__ = ['WAL']
+
+from arango.api import APIWrapper
from arango.exceptions import (
WALFlushError,
WALPropertiesError,
WALConfigureError,
WALTransactionListError
)
+from arango.request import Request
-class WriteAheadLog(object):
- """ArangoDB write-ahead log object.
+class WAL(APIWrapper):
+ """WAL (Write-Ahead Log) API wrapper.
- :param connection: ArangoDB database connection
+ :param connection: HTTP connection.
:type connection: arango.connection.Connection
-
- .. note::
- This class is designed to be instantiated internally only.
+ :param executor: API executor.
+ :type executor: arango.executor.Executor
"""
- def __init__(self, connection):
- self._conn = connection
+ def __init__(self, connection, executor):
+ super(WAL, self).__init__(connection, executor)
+
+ # noinspection PyMethodMayBeStatic
+ def _format_properties(self, body):
+ """Format WAL properties.
- def __repr__(self):
- return ""
+ :param body: Response body.
+ :type body: dict
+ :return: Formatted body.
+ :rtype: dict
+ """
+ if 'allowOversizeEntries' in body:
+ body['oversized_ops'] = body.pop('allowOversizeEntries')
+ if 'logfileSize' in body:
+ body['log_size'] = body.pop('logfileSize')
+ if 'historicLogfiles' in body:
+ body['historic_logs'] = body.pop('historicLogfiles')
+ if 'reserveLogfiles' in body:
+ body['reserve_logs'] = body.pop('reserveLogfiles')
+ if 'syncInterval' in body:
+ body['sync_interval'] = body.pop('syncInterval')
+ if 'throttleWait' in body:
+ body['throttle_wait'] = body.pop('throttleWait')
+ if 'throttleWhenPending' in body:
+ body['throttle_limit'] = body.pop('throttleWhenPending')
+ return body
def properties(self):
- """Return the configuration of the write-ahead log.
+ """Return WAL properties.
- :returns: the configuration of the write-ahead log
+ :return: WAL properties.
:rtype: dict
- :raises arango.exceptions.WALPropertiesError: if the WAL properties
- cannot be retrieved from the server
+ :raise arango.exceptions.WALPropertiesError: If retrieval fails.
"""
- res = self._conn.get('/_admin/wal/properties')
- if res.status_code not in HTTP_OK:
- raise WALPropertiesError(res)
- return {
- 'oversized_ops': res.body.get('allowOversizeEntries'),
- 'log_size': res.body.get('logfileSize'),
- 'historic_logs': res.body.get('historicLogfiles'),
- 'reserve_logs': res.body.get('reserveLogfiles'),
- 'sync_interval': res.body.get('syncInterval'),
- 'throttle_wait': res.body.get('throttleWait'),
- 'throttle_limit': res.body.get('throttleWhenPending')
- }
-
- def configure(self, oversized_ops=None, log_size=None, historic_logs=None,
- reserve_logs=None, throttle_wait=None, throttle_limit=None):
- """Configure the parameters of the write-ahead log.
-
- :param oversized_ops: execute and store ops bigger than a log file
+ request = Request(
+ method='get',
+ endpoint='/_admin/wal/properties'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise WALPropertiesError(resp, request)
+ return self._format_properties(resp.body)
+
+ return self._execute(request, response_handler)
+
+ def configure(self,
+ oversized_ops=None,
+ log_size=None,
+ historic_logs=None,
+ reserve_logs=None,
+ throttle_wait=None,
+ throttle_limit=None):
+ """Configure WAL properties.
+
+ :param oversized_ops: If set to True, operations bigger than a single
+ log file are allowed to be executed and stored.
:type oversized_ops: bool
- :param log_size: the size of each write-ahead log file
+ :param log_size: Size of each write-ahead log file in bytes.
:type log_size: int
- :param historic_logs: the number of historic log files to keep
+ :param historic_logs: Max number of historic log files to keep.
:type historic_logs: int
- :param reserve_logs: the number of reserve log files to allocate
+ :param reserve_logs: Max number of reserve log files to allocate.
:type reserve_logs: int
- :param throttle_wait: wait time before aborting when throttled (in ms)
+ :param throttle_wait: Wait time before aborting when write-throttled
+ in milliseconds.
:type throttle_wait: int
- :param throttle_limit: number of pending gc ops before write-throttling
+ :param throttle_limit: Number of pending garbage collector operations
+ that, when reached, activates write-throttling. Value of 0 means
+ no throttling is triggered.
:type throttle_limit: int
- :returns: the new configuration of the write-ahead log
+ :return: New WAL properties.
:rtype: dict
- :raises arango.exceptions.WALPropertiesError: if the WAL properties
- cannot be modified
+ :raise arango.exceptions.WALConfigureError: If operation fails.
"""
data = {}
if oversized_ops is not None:
@@ -80,67 +110,82 @@ def configure(self, oversized_ops=None, log_size=None, historic_logs=None,
data['throttleWait'] = throttle_wait
if throttle_limit is not None:
data['throttleWhenPending'] = throttle_limit
- res = self._conn.put('/_admin/wal/properties', data=data)
- if res.status_code not in HTTP_OK:
- raise WALConfigureError(res)
- return {
- 'oversized_ops': res.body.get('allowOversizeEntries'),
- 'log_size': res.body.get('logfileSize'),
- 'historic_logs': res.body.get('historicLogfiles'),
- 'reserve_logs': res.body.get('reserveLogfiles'),
- 'sync_interval': res.body.get('syncInterval'),
- 'throttle_wait': res.body.get('throttleWait'),
- 'throttle_limit': res.body.get('throttleWhenPending')
- }
+
+ request = Request(
+ method='put',
+ endpoint='/_admin/wal/properties',
+ data=data
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise WALConfigureError(resp, request)
+ return self._format_properties(resp.body)
+
+ return self._execute(request, response_handler)
def transactions(self):
- """Return details on currently running transactions.
+ """Return details on currently running WAL transactions.
+
+ Fields in the returned details are as follows:
- Fields in the returned dictionary:
+ .. code-block:: none
- - *last_collected*: the ID of the last collected log file (at the \
- start of each running transaction) or ``None`` if no transactions are \
- running
+ "last_collected" : ID of the last collected log file (at the
+ start of each running transaction) or None
+ if no transactions are running.
- - *last_sealed*: the ID of the last sealed log file (at the start \
- of each running transaction) or ``None`` if no transactions are \
- running
+ "last_sealed" : ID of the last sealed log file (at the start
+ of each running transaction) or None if no
+ transactions are running.
- - *count*: the number of current running transactions
+ "count" : Number of currently running transactions.
- :returns: the information about the currently running transactions
+ :return: Details on currently running WAL transactions.
:rtype: dict
- :raises arango.exceptions.WALTransactionListError: if the details on
- the transactions cannot be retrieved
+ :raise arango.exceptions.WALTransactionListError: If retrieval fails.
"""
- res = self._conn.get('/_admin/wal/transactions')
- if res.status_code not in HTTP_OK:
- raise WALTransactionListError(res)
- return {
- 'last_collected': res.body['minLastCollected'],
- 'last_sealed': res.body['minLastSealed'],
- 'count': res.body['runningTransactions']
- }
+ request = Request(
+ method='get',
+ endpoint='/_admin/wal/transactions'
+ )
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise WALTransactionListError(resp, request)
+ if 'minLastCollected' in resp.body:
+ resp.body['last_collected'] = resp.body.pop('minLastCollected')
+ if 'minLastSealed' in resp.body:
+ resp.body['last_sealed'] = resp.body.pop('minLastSealed')
+ if 'runningTransactions' in resp.body:
+ resp.body['count'] = resp.body.pop('runningTransactions')
+ return resp.body
+
+ return self._execute(request, response_handler)
def flush(self, sync=True, garbage_collect=True):
- """Flush the write-ahead log to collection journals and data files.
+ """Synchronize WAL to disk.
- :param sync: block until data is synced to disk
+ :param sync: Block until the synchronization is complete.
:type sync: bool
- :param garbage_collect: block until flushed data is garbage collected
+ :param garbage_collect: Block until flushed data is garbage collected.
:type garbage_collect: bool
- :returns: whether the write-ahead log was flushed successfully
+ :return: True if WAL was flushed successfully.
:rtype: bool
- :raises arango.exceptions.WALFlushError: it the WAL cannot
- be flushed
+ :raise arango.exceptions.WALFlushError: If flush operation fails.
"""
- res = self._conn.put(
- '/_admin/wal/flush',
- data={
+ request = Request(
+ method='put',
+ endpoint='/_admin/wal/flush',
+ params={
'waitForSync': sync,
'waitForCollector': garbage_collect
}
)
- if res.status_code not in HTTP_OK:
- raise WALFlushError(res)
- return not res.body.get('error')
+
+ def response_handler(resp):
+ if not resp.is_success:
+ raise WALFlushError(resp, request)
+ return True
+
+ return self._execute(request, response_handler)
diff --git a/docs/Makefile b/docs/Makefile
index 56bf3b42..a565179a 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -1,225 +1,20 @@
-# Makefile for Sphinx documentation
+# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
+SPHINXBUILD = python -msphinx
+SPHINXPROJ = python-arango
+SOURCEDIR = .
BUILDDIR = _build
-# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help
+# Put it first so that "make" without argument is like "make help".
help:
- @echo "Please use \`make ' where is one of"
- @echo " html to make standalone HTML files"
- @echo " dirhtml to make HTML files named index.html in directories"
- @echo " singlehtml to make a single large HTML file"
- @echo " pickle to make pickle files"
- @echo " json to make JSON files"
- @echo " htmlhelp to make HTML files and a HTML help project"
- @echo " qthelp to make HTML files and a qthelp project"
- @echo " applehelp to make an Apple Help Book"
- @echo " devhelp to make HTML files and a Devhelp project"
- @echo " epub to make an epub"
- @echo " epub3 to make an epub3"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
- @echo " latexpdf to make LaTeX files and run them through pdflatex"
- @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
- @echo " text to make text files"
- @echo " man to make manual pages"
- @echo " texinfo to make Texinfo files"
- @echo " info to make Texinfo files and run them through makeinfo"
- @echo " gettext to make PO message catalogs"
- @echo " changes to make an overview of all changed/added/deprecated items"
- @echo " xml to make Docutils-native XML files"
- @echo " pseudoxml to make pseudoxml-XML files for display purposes"
- @echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
- @echo " coverage to run coverage check of the documentation (if enabled)"
- @echo " dummy to check syntax errors of document sources"
-
-.PHONY: clean
-clean:
- rm -rf $(BUILDDIR)/*
-
-.PHONY: html
-html:
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-.PHONY: dirhtml
-dirhtml:
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-.PHONY: singlehtml
-singlehtml:
- $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
- @echo
- @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-.PHONY: pickle
-pickle:
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
- @echo
- @echo "Build finished; now you can process the pickle files."
-
-.PHONY: json
-json:
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
- @echo
- @echo "Build finished; now you can process the JSON files."
-
-.PHONY: htmlhelp
-htmlhelp:
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
- @echo
- @echo "Build finished; now you can run HTML Help Workshop with the" \
- ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-.PHONY: qthelp
-qthelp:
- $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
- @echo
- @echo "Build finished; now you can run "qcollectiongenerator" with the" \
- ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/python-arango.qhcp"
- @echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/python-arango.qhc"
-
-.PHONY: applehelp
-applehelp:
- $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
- @echo
- @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
- @echo "N.B. You won't be able to view it unless you put it in" \
- "~/Library/Documentation/Help or install it in your application" \
- "bundle."
-
-.PHONY: devhelp
-devhelp:
- $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
- @echo
- @echo "Build finished."
- @echo "To view the help file:"
- @echo "# mkdir -p $$HOME/.local/share/devhelp/python-arango"
- @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/python-arango"
- @echo "# devhelp"
-
-.PHONY: epub
-epub:
- $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
- @echo
- @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-.PHONY: epub3
-epub3:
- $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3
- @echo
- @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
-
-.PHONY: latex
-latex:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo
- @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
- @echo "Run \`make' in that directory to run these through (pdf)latex" \
- "(use \`make latexpdf' here to do that automatically)."
-
-.PHONY: latexpdf
-latexpdf:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo "Running LaTeX files through pdflatex..."
- $(MAKE) -C $(BUILDDIR)/latex all-pdf
- @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-.PHONY: latexpdfja
-latexpdfja:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo "Running LaTeX files through platex and dvipdfmx..."
- $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
- @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-.PHONY: text
-text:
- $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
- @echo
- @echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-.PHONY: man
-man:
- $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
- @echo
- @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-.PHONY: texinfo
-texinfo:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo
- @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
- @echo "Run \`make' in that directory to run these through makeinfo" \
- "(use \`make info' here to do that automatically)."
-
-.PHONY: info
-info:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo "Running Texinfo files through makeinfo..."
- make -C $(BUILDDIR)/texinfo info
- @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-.PHONY: gettext
-gettext:
- $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
- @echo
- @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-.PHONY: changes
-changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
- @echo
- @echo "The overview file is in $(BUILDDIR)/changes."
-
-.PHONY: linkcheck
-linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
- @echo
- @echo "Link check complete; look for any errors in the above output " \
- "or in $(BUILDDIR)/linkcheck/output.txt."
-
-.PHONY: doctest
-doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
- @echo "Testing of doctests in the sources finished, look at the " \
- "results in $(BUILDDIR)/doctest/output.txt."
-
-.PHONY: coverage
-coverage:
- $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
- @echo "Testing of coverage in the sources finished, look at the " \
- "results in $(BUILDDIR)/coverage/python.txt."
-
-.PHONY: xml
-xml:
- $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
- @echo
- @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-.PHONY: pseudoxml
-pseudoxml:
- $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
- @echo
- @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
+.PHONY: help Makefile
-.PHONY: dummy
-dummy:
- $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy
- @echo
- @echo "Build finished. Dummy builder generates no files."
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
\ No newline at end of file
diff --git a/docs/admin.rst b/docs/admin.rst
index 17b5bca2..4beb2479 100644
--- a/docs/admin.rst
+++ b/docs/admin.rst
@@ -1,130 +1,63 @@
-.. _admin-page:
-
Server Administration
---------------------
-Python-arango provides operations for server administration and monitoring such
-as retrieving statistics and reading logs.
+Python-arango provides operations for server administration and monitoring.
+Most of these operations can only be performed by admin users via ``_system``
+database.
-Example:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
- client = ArangoClient(username='root', password='password')
-
- # Check connection to the server
- client.verify()
-
- # List the databases
- client.databases()
-
- # Get the server version
- client.version()
-
- # Get the required DB version
- client.required_db_version()
-
- # Get the server time
- client.time()
-
- # Get the server role in a cluster
- client.role()
-
- # Get the server statistics
- client.statistics()
-
- # Read the server log
- client.read_log(level="debug")
-
- # Get the log levels
- client.log_level()
-
- # Set the log levels
- client.set_log_level(
- agency='DEBUG',
- collector='INFO',
- threads='WARNING'
- )
-
- # List the endpoints the server is listening on
- client.endpoints()
-
- # Echo the last request
- client.echo()
-
- # Suspend the server
- client.sleep(seconds=2)
-
- # Shutdown the server
- client.shutdown()
-
- # Reload the routing collection
- client.reload_routing()
-
-
-Note that the methods of :class:`arango.client.ArangoClient` above can only
-be called by root user with access to ``_system`` database. Non-root users can
-call the equivalent methods of :class:`arango.database.Database` through a
-database they have access to instead. For example:
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
-.. code-block:: python
+ # Connect to "_system" database as root user.
+ sys_db = client.db('_system', username='root', password='passwd')
- from arango import ArangoClient
+ # Check the server connection by sending a test GET request.
+ sys_db.ping()
- client = ArangoClient()
- db = client.database(
- name='database-the-user-has-access-to',
- username='username',
- password='password'
- )
+ # Retrieve the server version.
+ sys_db.version()
- # Check connection to the server
- db.verify()
+ # Retrieve the server details.
+ sys_db.details()
- # Get the server version
- db.version()
+ # Retrieve the target DB version.
+ sys_db.required_db_version()
- # Get the required DB version
- db.required_db_version()
+ # Retrieve the database engine.
+ sys_db.engine()
- # Get the server time
- db.time()
+ # Retrieve the server time.
+ sys_db.time()
- # Get the server role in a cluster
- db.role()
+ # Retrieve the server role in a cluster.
+ sys_db.role()
- # Get the server statistics
- db.statistics()
+ # Retrieve the server statistics.
+ sys_db.statistics()
- # Read the server log
- db.read_log(level="debug")
+ # Read the server log.
+ sys_db.read_log(level="debug")
- # Get the log levels
- db.log_level()
+ # Retrieve the log levels.
+ sys_db.log_levels()
- # Set the log levels
- db.set_log_level(
+ # Set the log .
+ sys_db.set_log_levels(
agency='DEBUG',
collector='INFO',
threads='WARNING'
)
- # Echo the last request
- db.echo()
-
- # Suspend the server
- db.sleep(seconds=2)
-
- # Shutdown the server
- db.shutdown()
-
- # Reload the routing collection
- db.reload_routing()
+ # Echo the last request.
+ sys_db.echo()
+ # Reload the routing collection.
+ sys_db.reload_routing()
-Methods :func:`arango.client.ArangoClient.databases` and
-:func:`arango.client.ArangoClient.endpoints` are not available to
-non-root users. Refer to classes :class:`arango.client.ArangoClient` and
-:ref::class:`arango.database.Database` for more details on admin methods.
+See :ref:`StandardDatabase` for API specification.
\ No newline at end of file
diff --git a/docs/aql.rst b/docs/aql.rst
index 0fb0a402..2ff84f91 100644
--- a/docs/aql.rst
+++ b/docs/aql.rst
@@ -1,109 +1,151 @@
-.. _aql-page:
-
AQL
----
-**ArangoDB Query Language (AQL)** is used to retrieve and modify data in
-ArangoDB. AQL is similar to SQL for relational databases, but without the
-support for data definition operations such as creating/deleting
-:ref:`databases `, :ref:`collections ` and
-:ref:`indexes `. For more general information on AQL visit
-`here `__.
+**ArangoDB Query Language (AQL)** is used to read and write data. It is similar
+to SQL for relational databases, but without the support for data definition
+operations such as creating or deleting :doc:`databases `,
+:doc:`collections ` or :doc:`indexes `. For more
+information, refer to `ArangoDB manual`_.
+
+.. _ArangoDB manual: https://docs.arangodb.com
AQL Queries
===========
-**AQL queries** can be invoked using the :ref:`AQL` class, which outputs
-instances of the :ref:`Cursor` class. For more information on the syntax of AQL
-visit `here `__.
+AQL queries are invoked from AQL API wrapper. Executing queries returns
+:doc:`result cursors `.
-Below is an example of executing a query:
+**Example:**
-.. code-block:: python
+.. testcode::
- from arango import ArangoClient
+ from arango import ArangoClient, AQLQueryKillError
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- # Set up some test data to query against
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Insert some test documents into "students" collection.
db.collection('students').insert_many([
{'_key': 'Abby', 'age': 22},
{'_key': 'John', 'age': 18},
{'_key': 'Mary', 'age': 21}
])
- # Retrieve the execution plan without running the query
- db.aql.explain('FOR s IN students RETURN s')
+ # Get the AQL API wrapper.
+ aql = db.aql
+
+ # Retrieve the execution plan without running the query.
+ aql.explain('FOR doc IN students RETURN doc')
- # Validate the query without executing it
- db.aql.validate('FOR s IN students RETURN s')
+ # Validate the query without executing it.
+ aql.validate('FOR doc IN students RETURN doc')
# Execute the query
cursor = db.aql.execute(
- 'FOR s IN students FILTER s.age < @value RETURN s',
+ 'FOR doc IN students FILTER doc.age < @value RETURN doc',
bind_vars={'value': 19}
)
# Iterate through the result cursor
- print([student['_key'] for student in cursor])
+ student_keys = [doc['_key'] for doc in cursor]
+
+ # List currently running queries.
+ aql.queries()
+
+ # List any slow queries.
+ aql.slow_queries()
+
+ # Clear slow AQL queries if any.
+ aql.clear_slow_queries()
+
+ # Retrieve AQL query tracking properties.
+ aql.tracking()
+
+ # Configure AQL query tracking properties.
+ aql.set_tracking(
+ max_slow_queries=10,
+ track_bind_vars=True,
+ track_slow_queries=True
+ )
+
+ # Kill a running query (this should fail due to invalid ID).
+ try:
+ aql.kill('some_query_id')
+ except AQLQueryKillError as err:
+ assert err.http_code == 400
+ assert err.error_code == 1591
+ assert 'cannot kill query' in err.message
+
+See :ref:`AQL` for API specification.
AQL User Functions
==================
-**AQL user functions** are custom functions which can be defined by users to
-extend the functionality of AQL. While python-arango provides ways to add,
-delete and retrieve user functions in Python, the functions themselves must be
-defined in Javascript. For more general information on AQL user functions visit
-this `page `__.
+**AQL User Functions** are custom functions you define in Javascript to extend
+AQL functionality. They are somewhat similar to SQL procedures.
-Below is an example of creating and deleting an AQL function:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- # Create a new AQL user function
- db.aql.create_function(
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the AQL API wrapper.
+ aql = db.aql
+
+ # Create a new AQL user function.
+ aql.create_function(
+ # Grouping by name prefix is supported.
name='functions::temperature::converter',
code='function (celsius) { return celsius * 1.8 + 32; }'
)
- # List all available AQL user functions
- db.aql.functions()
+ # List AQL user functions.
+ aql.functions()
- # Delete an existing AQL user function
- db.aql.delete_function('functions::temperature::converter')
+ # Delete an existing AQL user function.
+ aql.delete_function('functions::temperature::converter')
-Refer to :ref:`AQL` class for more details.
+See :ref:`AQL` for API specification.
AQL Query Cache
===============
-**AQL query cache** is used to minimize redundant calculation of the same
-query result. It is useful when read queries are called frequently and write
-queries are not. For more general information on AQL query caches visit this
-`page `__.
+**AQL Query Cache** is used to minimize redundant calculation of the same query
+results. It is useful when read queries are issued frequently and write queries
+are not.
-Here is an example showing how the AQL query cache can be used:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- # Configure the AQL query cache properties
- db.aql.cache.configure(mode='demand', limit=10000)
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the AQL API wrapper.
+ aql = db.aql
+
+ # Retrieve AQL query cache properties.
+ aql.cache.properties()
- # Retrieve the AQL query cache properties
- db.aql.cache.properties()
+ # Configure AQL query cache properties
+ aql.cache.configure(mode='demand', limit=10000)
- # Clear the AQL query cache
- db.aql.cache.clear()
+ # Clear results in AQL query cache.
+ aql.cache.clear()
-Refer to :ref:`AQLQueryCache` class for more details.
+See :ref:`AQLQueryCache` for API specification.
diff --git a/docs/async.rst b/docs/async.rst
index 506aed15..f27e5547 100644
--- a/docs/async.rst
+++ b/docs/async.rst
@@ -1,72 +1,102 @@
-.. _async-page:
-
Async Execution
---------------
-Python-arango provides support for **asynchronous executions**, where incoming
-requests are placed in a server-side, in-memory task queue and executed in a
-fire-and-forget style. The results of the requests can be retrieved later via
-:ref:`AsyncJob` objects.
-
-.. note::
- The user should be mindful of the server-side memory while using
- asynchronous executions with a large number of requests.
+Python-arango supports **async execution**, where it sends requests to ArangoDB
+server in fire-and-forget style (HTTP 202 returned). The server places incoming
+requests in its queue and processes them in the background. The results can be
+retrieved from the server later via :ref:`AsyncJob` objects.
-.. warning::
- Asynchronous execution is currently an experimental feature and is not
- thread-safe.
+**Example:**
-Here is an example showing how asynchronous executions can be used:
+.. testcode::
-.. code-block:: python
+ import time
- from arango import ArangoClient, ArangoError
+ from arango import (
+ ArangoClient,
+ AQLQueryExecuteError,
+ AsyncJobCancelError,
+ AsyncJobClearError
+ )
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
-
- # Initialize an AsyncExecution object to make asynchronous requests
- asynchronous = db.asynchronous(return_result=True)
-
- # AsyncExecution has a similar interface as that of Database, but
- # AsyncJob objects are returned instead of results on method calls
- job1 = asynchronous.collection('students').insert({'_key': 'Abby'})
- job2 = asynchronous.collection('students').insert({'_key': 'John'})
- job3 = asynchronous.collection('students').insert({'_key': 'John'})
- job4 = asynchronous.aql.execute('FOR d IN students RETURN d')
-
- # Check the statuses of the asynchronous jobs
- for job in [job1, job2, job3, job4]:
- print(job.status())
-
- # Retrieve the result of a job
- result = job1.result()
-
- # If a job fails, the exception object is returned (not raised)
- result = job3.result()
- assert isinstance(result, ArangoError)
- # Cancel a pending job
- job3.cancel()
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
- # Delete a result of a job from the server to free up resources
- job4.clear()
+ # Begin async execution. This returns an instance of AsyncDatabase, a
+ # database-level API wrapper tailored specifically for async execution.
+ async_db = db.begin_async_execution(return_result=True)
- # List the first 100 jobs completed (root user only)
- client.async_jobs(status='done', count=100)
+ # Child wrappers are also tailored for async execution.
+ async_aql = async_db.aql
+ async_col = async_db.collection('students')
- # List the first 100 jobs still pending in the queue (root user only)
- client.async_jobs(status='pending', count=100)
+ # API execution context is always set to "async".
+ assert async_db.context == 'async'
+ assert async_aql.context == 'async'
+ assert async_col.context == 'async'
- # Non-root users can only list jobs in a database they have access to
- client.db('database-the-user-has-access-to').async_jobs()
+ # On API execution, AsyncJob objects are returned instead of results.
+ job1 = async_col.insert({'_key': 'Neal'})
+ job2 = async_col.insert({'_key': 'Lily'})
+ job3 = async_aql.execute('RETURN 100000')
+ job4 = async_aql.execute('INVALID QUERY') # Fails due to syntax error.
- # Clear all jobs from the server (root user only)
- client.clear_async_jobs()
-
- # Non-root users can only clear jobs in a databases they have access to
- client.db('database-the-user-has-access-to').clear_async_jobs()
+ # Retrieve the status of each async job.
+ for job in [job1, job2, job3, job4]:
+ # Job status can be "pending", "done" or "cancelled".
+ assert job.status() in {'pending', 'done', 'cancelled'}
+
+ # Let's wait until the jobs are finished.
+ while job.status() != 'done':
+ time.sleep(0.1)
+
+ # Retrieve the results of successful jobs.
+ metadata = job1.result()
+ assert metadata['_id'] == 'students/Neal'
+
+ metadata = job2.result()
+ assert metadata['_id'] == 'students/Lily'
+
+ cursor = job3.result()
+ assert cursor.next() == 100000
+
+ # If a job fails, the exception is propagated up during result retrieval.
+ try:
+ result = job4.result()
+ except AQLQueryExecuteError as err:
+ assert err.http_code == 400
+ assert err.error_code == 1501
+ assert 'syntax error' in err.message
+
+ # Cancel a job. Only pending jobs still in queue may be cancelled.
+ # Since job3 is done, there is nothing to cancel and an exception is raised.
+ try:
+ job3.cancel()
+ except AsyncJobCancelError as err:
+ assert err.message.endswith('job {} not found'.format(job3.id))
+
+ # Clear the result of a job from ArangoDB server to free up resources.
+ # Result of job4 was removed from the server automatically upon retrieval,
+ # so attempt to clear it raises an exception.
+ try:
+ job4.clear()
+ except AsyncJobClearError as err:
+ assert err.message.endswith('job {} not found'.format(job4.id))
+
+ # List the IDs of the first 100 async jobs completed.
+ db.async_jobs(status='done', count=100)
+
+ # List the IDs of the first 100 async jobs still pending.
+ db.async_jobs(status='pending', count=100)
+
+ # Clear all async jobs still sitting on the server.
+ db.clear_async_jobs()
+.. note::
+ Be mindful of server-side memory capacity when issuing a large number of
+ async requests in small time interval.
-Refer to :ref:`ArangoClient`, :ref:`AsyncExecution` and :ref:`AsyncJob`
-classes for more details.
+See :ref:`AsyncDatabase` and :ref:`AsyncJob` for API specification.
diff --git a/docs/batch.rst b/docs/batch.rst
index ec542693..5db8229a 100644
--- a/docs/batch.rst
+++ b/docs/batch.rst
@@ -1,55 +1,91 @@
-.. _batch-page:
-
Batch Execution
---------------
-Python-arango provides support for **batch executions**, where incoming
-requests are queued in client-side memory and executed in a single HTTP call.
-After the batch is committed, the results of the queued requests can be
-retrieved via :ref:`BatchJob` objects.
-
-.. note::
- The user should be mindful of the client-side memory while using batch
- executions with a large number of requests.
-
-.. warning::
- Batch execution is currently an experimental feature and is not
- thread-safe.
+Python-arango supports **batch execution**. Requests to ArangoDB server are
+placed in client-side in-memory queue, and committed together in a single HTTP
+call. After the commit, results can be retrieved from :ref:`BatchJob` objects.
-Here is an example showing how batch executions can be used:
+**Example:**
.. code-block:: python
- from arango import ArangoClient, ArangoError
+ from arango import ArangoClient, AQLQueryExecuteError
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- # Initialize the BatchExecution object via a context manager
- with db.batch(return_result=True) as batch:
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for "students" collection.
+ students = db.collection('students')
+
+ # Begin batch execution via context manager. This returns an instance of
+ # BatchDatabase, a database-level API wrapper tailored specifically for
+ # batch execution. The batch is automatically committed when exiting the
+ # context. The BatchDatabase wrapper cannot be reused after commit.
+ with db.begin_batch_execution(return_result=True) as batch_db:
+
+ # Child wrappers are also tailored for batch execution.
+ batch_aql = batch_db.aql
+ batch_col = batch_db.collection('students')
- # BatchExecution has a similar interface as that of Database, but
- # BatchJob objects are returned instead of results on method calls
- job1 = batch.aql.execute('FOR d IN students RETURN d')
- job2 = batch.collection('students').insert({'_key': 'Abby'})
- job3 = batch.collection('students').insert({'_key': 'John'})
- job4 = batch.collection('students').insert({'_key': 'John'})
+ # API execution context is always set to "batch".
+ assert batch_db.context == 'batch'
+ assert batch_aql.context == 'batch'
+ assert batch_col.context == 'batch'
- # Upon exiting context, the queued requests are committed
- for job in [job1, job2, job3, job4]:
- print(job.status())
+ # BatchJob objects are returned instead of results.
+ job1 = batch_col.insert({'_key': 'Kris'})
+ job2 = batch_col.insert({'_key': 'Rita'})
+ job3 = batch_aql.execute('RETURN 100000')
+ job4 = batch_aql.execute('INVALID QUERY') # Fails due to syntax error.
- # Retrieve the result of a job
- job1.result()
+ # Upon exiting context, batch is automatically committed.
+ assert 'Kris' in students
+ assert 'Rita' in students
- # If a job fails, the exception object is returned (not raised)
- assert isinstance(job4.result(), ArangoError)
+ # Retrieve the status of each batch job.
+ for job in batch_db.queued_jobs():
+ # Status is set to either "pending" (transaction is not committed yet
+ # and result is not available) or "done" (transaction is committed and
+ # result is available).
+ assert job.status() in {'pending', 'done'}
- # BatchExecution can also be initialized without a context manager
- batch = db.batch(return_result=True)
- students = batch.collection('students')
- job5 = batch.collection('students').insert({'_key': 'Jake'})
- job6 = batch.collection('students').insert({'_key': 'Jill'})
- batch.commit() # In which case the commit must be called explicitly
+ # Retrieve the results of successful jobs.
+ metadata = job1.result()
+ assert metadata['_id'] == 'students/Kris'
+
+ metadata = job2.result()
+ assert metadata['_id'] == 'students/Rita'
+
+ cursor = job3.result()
+ assert cursor.next() == 100000
+
+ # If a job fails, the exception is propagated up during result retrieval.
+ try:
+ result = job4.result()
+ except AQLQueryExecuteError as err:
+ assert err.http_code == 400
+ assert err.error_code == 1501
+ assert 'syntax error' in err.message
+
+ # Batch execution can be initiated without using a context manager.
+ # If return_result parameter is set to False, no jobs are returned.
+ batch_db = db.begin_batch_execution(return_result=False)
+ batch_db.collection('students').insert({'_key': 'Jake'})
+ batch_db.collection('students').insert({'_key': 'Jill'})
+
+ # The commit must be called explicitly.
+ batch_db.commit()
+ assert 'Jake' in students
+ assert 'Jill' in students
+
+.. note::
+ * Be mindful of client-side memory capacity when issuing a large number of
+ requests in single batch execution.
+ * :ref:`BatchDatabase` and :ref:`BatchJob` instances are stateful objects,
+ and should not be shared across multiple threads.
+ * :ref:`BatchDatabase` instance cannot be reused after commit.
-Refer to :ref:`BatchExecution` and :ref:`BatchJob` classes for more details.
+See :ref:`BatchDatabase` and :ref:`BatchJob` for API specification.
diff --git a/docs/classes.rst b/docs/classes.rst
deleted file mode 100644
index 7eaf7946..00000000
--- a/docs/classes.rst
+++ /dev/null
@@ -1,149 +0,0 @@
-Class Specifications
---------------------
-
-This page contains the specifications for all classes and methods available in
-python-arango.
-
-.. _ArangoClient:
-
-ArangoClient
-============
-
-.. autoclass:: arango.client.ArangoClient
- :members:
-
-.. _AsyncExecution:
-
-AsyncExecution
-==============
-
-.. autoclass:: arango.async.AsyncExecution
- :members:
- :exclude-members: handle_request
-
-.. _AsyncJob:
-
-AsyncJob
-========
-
-.. autoclass:: arango.async.AsyncJob
- :members:
-
-.. _AQL:
-
-AQL
-====
-
-.. autoclass:: arango.aql.AQL
- :members:
-
-.. _AQLQueryCache:
-
-AQLQueryCache
-=============
-
-.. autoclass:: arango.aql.AQLQueryCache
- :members:
-
-.. _BaseHTTPClient:
-
-BaseHTTPClient
-==============
-
-.. autoclass:: arango.http_clients.base.BaseHTTPClient
- :members:
-
-
-.. _BatchExecution:
-
-BatchExecution
-==============
-
-.. autoclass:: arango.batch.BatchExecution
- :members:
- :exclude-members: handle_request
-
-.. _BatchJob:
-
-BatchJob
-========
-
-.. autoclass:: arango.batch.BatchJob
- :members:
- :exclude-members: update
-
-.. _Cursor:
-
-Cursor
-======
-
-.. autoclass:: arango.cursor.Cursor
- :members:
-
-.. _Collection:
-
-Collection
-==========
-
-.. autoclass:: arango.collections.Collection
- :inherited-members:
- :members:
-
-.. _Database:
-
-Database
-========
-
-.. autoclass:: arango.database.Database
- :members:
-
-.. _EdgeCollection:
-
-EdgeCollection
-==============
-
-.. autoclass:: arango.collections.EdgeCollection
- :inherited-members:
- :members:
-
-.. _Graph:
-
-Graph
-=====
-
-.. autoclass:: arango.graph.Graph
- :members:
-
-.. _Response:
-
-Response
-========
-
-.. autoclass:: arango.response.Response
- :members:
-
-.. _Transaction:
-
-Transaction
-===========
-
-.. autoclass:: arango.transaction.Transaction
- :members:
- :exclude-members: handle_request
-
-.. _VertexCollection:
-
-VertexCollection
-================
-
-.. autoclass:: arango.collections.VertexCollection
- :inherited-members:
- :members:
-
-.. _WriteAheadLog:
-
-WriteAheadLog
-=============
-
-.. autoclass:: arango.wal.WriteAheadLog
- :members:
diff --git a/docs/client.rst b/docs/client.rst
deleted file mode 100644
index 58a6b625..00000000
--- a/docs/client.rst
+++ /dev/null
@@ -1,45 +0,0 @@
-.. _client-page:
-
-Getting Started
----------------
-
-Here is an example showing how a **python-arango** client can be initialized
-and used:
-
-.. code-block:: python
-
- from arango import ArangoClient
-
- # Initialize the client for ArangoDB
- client = ArangoClient(
- protocol='http',
- host='localhost',
- port=8529,
- username='root',
- password='',
- enable_logging=True
- )
-
- # Create a new database named "my_database"
- db = client.create_database('my_database')
-
- # Create a new user with access to "my_database"
- client.create_user('admin', 'password')
- client.grant_user_access('admin', 'my_database')
-
- # Create a new collection named "students"
- students = db.create_collection('students')
-
- # Add a hash index to the collection
- students.add_hash_index(fields=['name'], unique=True)
-
- # Insert new documents into the collection
- students.insert({'name': 'jane', 'age': 19})
- students.insert({'name': 'josh', 'age': 18})
- students.insert({'name': 'jake', 'age': 21})
-
- # Execute an AQL query
- result = db.aql.execute('FOR s IN students RETURN s')
- print([student['name'] for student in result])
-
-Read the rest of the documentation to discover much more!
diff --git a/docs/collection.rst b/docs/collection.rst
index 72ea8248..178ac297 100644
--- a/docs/collection.rst
+++ b/docs/collection.rst
@@ -1,44 +1,54 @@
-.. _collection-page:
-
Collections
-----------
-A **collection** contains :ref:`documents `. It is uniquely
-identified by its name which must consist only of alphanumeric, hyphen and
-underscore characters. There are *three* types of collections: standard
-**document collections** which contain normal documents, **vertex collections**
-which also contain normal documents, and **edge collections** which contain
-:ref:`edges `.
+A **collection** contains :doc:`documents `. It is uniquely identified
+by its name, which must consist only of hyphen, underscore and alphanumeric
+characters. There are *three* types of collections in python-arango:
+
+* Standard Collection: contains regular :doc:`documents `.
+* :ref:`Vertex Collection `: contains vertex documents used
+ in graphs.
+* :ref:`Edge Collection `: contains edge documents used in
+ graphs.
-Here is an example showing how collections can be managed:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- # List all collections in "my_database"
- db.collections()
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
- # Create a new collection named "students"
- students = db.create_collection('students')
+ # List all collections in the database.
+ db.collections()
- # Retrieve an existing collection
- students = db.collection('students')
+ # Create a new collection named "students" if it does not exist.
+ # This returns an API wrapper for "students" collection.
+ if db.has_collection('students'):
+ students = db.collection('students')
+ else:
+ students = db.create_collection('students')
- # Retrieve collection information
+ # Retrieve collection properties.
+ students.name
+ students.db_name
students.properties()
students.revision()
students.statistics()
students.checksum()
students.count()
- # Perform actions on a collection
+ # Perform various operations.
students.load()
students.unload()
students.truncate()
students.configure(journal_size=3000000)
-Refer to :ref:`Collection` class for more details.
+ # Delete the collection.
+ db.delete_collection('students')
+
+See :ref:`StandardDatabase` and :ref:`StandardCollection` for API specification.
diff --git a/docs/conf.py b/docs/conf.py
index 0d617b09..40a248dd 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,7 +1,8 @@
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# python-arango documentation build configuration file, created by
-# sphinx-quickstart on Sun Jul 24 17:17:48 2016.
+# sphinx-quickstart on Thu Apr 19 03:40:33 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
@@ -19,7 +20,7 @@
import os
import sys
-from arango.version import VERSION
+from arango.version import __version__
sys.path.insert(0, os.path.abspath('../arango'))
@@ -35,13 +36,14 @@
# ones.
extensions = [
'sphinx.ext.autodoc',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.coverage',
'sphinx.ext.viewcode',
- 'sphinx.ext.todo',
- 'sphinx.ext.coverage'
+ 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['templates']
+templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
@@ -49,26 +51,22 @@
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
-# The encoding of source files.
-#
-# source_encoding = 'utf-8-sig'
-
# The master toctree document.
master_doc = 'index'
# General information about the project.
-project = u'python-arango'
-copyright = u'2016, Joohwan Oh'
-author = u'Joohwan Oh'
+project = 'python-arango'
+copyright = '2016, Joohwan Oh'
+author = 'Joohwan Oh'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = VERSION
+version = __version__
# The full version, including alpha/beta/rc tags.
-release = VERSION
+release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -77,50 +75,16 @@
# Usually you set "language" from the command line for these cases.
language = None
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#
-# today = ''
-#
-# Else, today_fmt is used as the format for a strftime call.
-#
-# today_fmt = '%B %d, %Y'
-
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
-# The reST default role (used for this markup: `text`) to use for all
-# documents.
-#
-# default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#
-# add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#
-# add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#
-show_authors = True
-
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
-# A list of ignored prefixes for module index sorting.
-# modindex_common_prefix = []
-
-# If true, keep warnings as "system message" paragraphs in the built documents.
-# keep_warnings = False
-
# If true, `todo` and `todoList` produce output, else they produce nothing.
-todo_include_todos = False
+todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
@@ -136,188 +100,71 @@
#
# html_theme_options = {}
-# Add any paths that contain custom themes here, relative to this directory.
-# html_theme_path = []
-
-# The name for this set of Sphinx documents.
-# " v documentation" by default.
-#
-# html_title = u'python-arango v3.0.0'
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-#
-# html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#
-# html_logo = None
-
-# The name of an image file (relative to this directory) to use as a favicon of
-# the docs. This file should be a Windows icon file (.ico) being 16x16 / 32x32
-# pixels large.
-#
-# html_favicon = None
-
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
-# Add any extra paths that contain custom files (such as robots.txt or
-# .htaccess) here, relative to this directory. These files are copied
-# directly to the root of the documentation.
-#
-# html_extra_path = []
-
-# If not None, a 'Last updated on:' timestamp is inserted at every page
-# bottom, using the given strftime format.
-# The empty string is equivalent to '%b %d, %Y'.
-#
-# html_last_updated_fmt = None
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#
-# html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#
-# html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#
-# html_additional_pages = {}
-
-# If false, no module index is generated.
-#
-# html_domain_indices = True
-
-# If false, no index is generated.
-#
-# html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#
-# html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#
-# html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#
-# html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#
-# html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#
-# html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-# html_file_suffix = None
-
-# Language to be used for generating the HTML full-text search index.
-# Sphinx supports the following languages:
-# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
-# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
-#
-# html_search_language = 'en'
+# Custom sidebar templates, must be a dictionary that maps document names
+# to template names.
+#
+# This is required for the alabaster theme
+# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
+html_sidebars = {
+ '**': [
+ 'about.html',
+ 'navigation.html',
+ 'relations.html', # needs 'show_related': True theme option to display
+ 'searchbox.html',
+ 'donate.html',
+ ]
+}
-# A dictionary with options for the search language support, empty by default.
-# 'ja' uses this config value.
-# 'zh' user can custom change `jieba` dictionary path.
-#
-# html_search_options = {'type': 'default'}
-# The name of a javascript file (relative to the configuration directory) that
-# implements a search results scorer. If empty, the default will be used.
-#
-# html_search_scorer = 'scorer.js'
+# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-arangodoc'
+
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- #
- # 'papersize': 'letterpaper',
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
- # The font size ('10pt', '11pt' or '12pt').
- #
- # 'pointsize': '10pt',
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
- # Additional stuff for the LaTeX preamble.
- #
- # 'preamble': '',
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
- # Latex figure (float) alignment
- #
- # 'figure_align': 'htbp',
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'python-arango.tex', u'python-arango Documentation',
- u'Joohwan Oh', 'manual'),
+ (master_doc, 'python-arango.tex', 'python-arango Documentation',
+ 'Joohwan Oh', 'manual'),
]
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#
-# latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#
-# latex_use_parts = False
-
-# If true, show page references after internal links.
-#
-# latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#
-# latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-#
-# latex_appendices = []
-
-# It false, will not define \strong, \code, itleref, \crossref ... but only
-# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
-# packages.
-#
-# latex_keep_old_macro_names = True
-
-# If false, no module index is generated.
-#
-# latex_domain_indices = True
-
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- (master_doc, 'python-arango', u'python-arango Documentation',
+ (master_doc, 'python-arango', 'python-arango Documentation',
[author], 1)
]
-# If true, show URL addresses after external links.
-#
-# man_show_urls = False
-
# -- Options for Texinfo output -------------------------------------------
@@ -325,25 +172,68 @@
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'python-arango', u'python-arango Documentation',
+ (master_doc, 'python-arango', 'python-arango Documentation',
author, 'python-arango', 'One line description of project.',
'Miscellaneous'),
]
-# Documents to append as an appendix to all manuals.
-#
-# texinfo_appendices = []
-
-# If false, no module index is generated.
-#
-# texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#
-# texinfo_show_urls = 'footnote'
-
-# If true, do not generate a @detailmenu in the "Top" node's menu.
-#
-# texinfo_no_detailmenu = False
-
-autodoc_member_order = 'bysource'
\ No newline at end of file
+autodoc_member_order = 'bysource'
+
+doctest_global_setup = """
+from arango import ArangoClient
+
+# Initialize the ArangoDB client.
+client = ArangoClient()
+
+# Connect to "_system" database as root user.
+sys_db = client.db('_system', username='root', password='passwd')
+
+# Create "test" database if it does not exist.
+if not sys_db.has_database('test'):
+ sys_db.create_database('test')
+
+# Ensure that user "johndoe@gmail.com" does not exist.
+if sys_db.has_user('johndoe@gmail.com'):
+ sys_db.delete_user('johndoe@gmail.com')
+
+# Connect to "test" database as root user.
+db = client.db('test', username='root', password='passwd')
+
+# Create "students" collection if it does not exist.
+if db.has_collection('students'):
+ db.collection('students').truncate()
+else:
+ db.create_collection('students')
+
+# Ensure that "cities" collection does not exist.
+if db.has_collection('cities'):
+ db.delete_collection('cities')
+
+# Create "school" graph if it does not exist.
+if db.has_graph("school"):
+ school = db.graph('school')
+else:
+ school = db.create_graph('school')
+
+# Create "teachers" vertex collection if it does not exist.
+if school.has_vertex_collection('teachers'):
+ school.vertex_collection('teachers').truncate()
+else:
+ school.create_vertex_collection('teachers')
+
+# Create "lectures" vertex collection if it does not exist.
+if school.has_vertex_collection('lectures'):
+ school.vertex_collection('lectures').truncate()
+else:
+ school.create_vertex_collection('lectures')
+
+# Create "teach" edge definition if it does not exist.
+if school.has_edge_definition('teach'):
+ school.edge_collection('teach').truncate()
+else:
+ school.create_edge_definition(
+ edge_collection='teach',
+ from_vertex_collections=['teachers'],
+ to_vertex_collections=['lectures']
+ )
+"""
diff --git a/docs/contributing.rst b/docs/contributing.rst
new file mode 100644
index 00000000..a51bc4b3
--- /dev/null
+++ b/docs/contributing.rst
@@ -0,0 +1,120 @@
+Contributing
+------------
+
+Requirements
+============
+
+Before submitting a pull request on GitHub_, please make sure you meet the
+following **requirements**:
+
+* The pull request points to dev_ (development) branch.
+* All changes are squashed into a single commit (I like to use ``git rebase -i``
+ to do this).
+* The commit message is in present tense (good: "Add feature", bad:
+ "Added feature").
+* Correct and consistent style: Sphinx_-compatible docstrings, correct snake
+ and camel casing, and PEP8_ compliance (see below).
+* No classes/methods/functions with missing docstrings or commented-out lines.
+ You can take a look at the existing code in python-arango for examples.
+* The test coverage_ remains at %100. Sometimes you may find yourself having to
+ write superfluous unit tests to keep this number up. If a piece of code is
+ trivial and has no need for unittests, use this_ to exclude it from coverage.
+* No build failures on TravisCI_. The builds automatically trigger on PR
+ submissions.
+* Does not break backward-compatibility (unless there is a really good reason).
+* Compatibility with all supported Python versions: 2.7, 3.4, 3.5 and 3.6.
+
+.. note::
+ The dev branch is occasionally rebased_, and its commit history may be
+ overwritten in the process. So before you begin feature work, git fetch or
+ pull to ensure that your local branch has not diverged. If you see git
+ conflicts and want to start from scratch, run this command:
+
+ .. code-block:: bash
+
+ ~$ git checkout dev
+ ~$ git fetch origin
+ ~$ git reset --hard origin/dev # THIS WILL WIPE ALL LOCAL CHANGES
+
+Style
+=====
+
+To ensure PEP8_ compliance, run flake8_:
+
+.. code-block:: bash
+
+ ~$ pip install flake8
+ ~$ git clone https://github.com/joowani/python-arango.git
+ ~$ cd python-arango
+ ~$ flake8
+
+You should try to resolve all issues reported. If there is a good reason to
+ignore errors from a specific piece of code, visit here_ to see how to exclude
+the lines.
+
+Testing
+=======
+
+To test your changes, run the integration test suite that comes with
+**python-arango** on your local machine. The test suite uses pytest_, and is
+designed to run against an actual database instance. Please use the latest
+version of ArangoDB with the following configuration:
+
+* **Host**: "localhost"
+* **Port**: 8529
+* **Username**: "root"
+* **Password**: "passwd"
+
+To run the test suite:
+
+.. code-block:: bash
+
+ ~$ pip install pytest
+ ~$ git clone https://github.com/joowani/python-arango.git
+ ~$ cd python-arango
+ ~$ py.test --complete --verbose
+
+To run the test suite with coverage report:
+
+.. code-block:: bash
+
+ ~$ pip install coverage pytest pytest-cov
+ ~$ git clone https://github.com/joowani/python-arango.git
+ ~$ cd python-arango
+ ~$ py.test --complete --verbose --cov=arango --cov-report=html
+
+ # Open the generated file htmlcov/index.html in a browser
+
+The test suite only operates on temporary databases created during the run,
+and is meant to be run in disposable environments (e.g. Travis).
+
+Documentation
+=============
+
+The documentation (including the README) is written in reStructuredText_ and
+uses Sphinx_. To build the HTML version of the documentation on your local
+machine:
+
+.. code-block:: bash
+
+ ~$ pip install sphinx sphinx_rtd_theme
+ ~$ git clone https://github.com/joowani/python-arango.git
+ ~$ cd python-arango/docs
+ ~$ sphinx-build . build
+ ~$ # Open the generated file build/index.html in a browser
+
+
+As always, thanks for your contribution!
+
+.. _rebased: https://git-scm.com/book/en/v2/Git-Branching-Rebasing
+.. _dev: https://github.com/joowani/python-arango/tree/dev
+.. _GitHub: https://github.com/joowani/python-arango
+.. _PEP8: https://www.python.org/dev/peps/pep-0008/
+.. _coverage: https://coveralls.io/github/joowani/python-arango
+.. _this: http://coverage.readthedocs.io/en/latest/excluding.html
+.. _TravisCI: https://travis-ci.org/joowani/python-arango
+.. _Sphinx: https://github.com/sphinx-doc/sphinx
+.. _flake8: http://flake8.pycqa.org
+.. _here: http://flake8.pycqa.org/en/latest/user/violations.html#in-line-ignoring-errors
+.. _pytest: https://github.com/pytest-dev/pytest
+.. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
diff --git a/docs/cursor.rst b/docs/cursor.rst
index 4e76f8f9..f5c747ef 100644
--- a/docs/cursor.rst
+++ b/docs/cursor.rst
@@ -1,62 +1,157 @@
-.. _cursor-page:
-
Cursors
-------
-Several operations provided by python-arango (e.g. :ref:`aql-page` queries)
-return query :ref:`Cursor` objects to batch the network communication between
-the server and the client. Each request from the cursor fetches the next set
-of documents. Depending on the query, the total number of documents in a result
-set may or may not be known in advance.
-
-.. note::
- In order to free the server resources as much as possible, python-arango
- deletes cursors as soon as their result sets are depleted.
+Many operations provided by python-arango (e.g. executing :doc:`aql` queries)
+return result **cursors** to batch the network communication between ArangoDB
+server and python-arango client. Each HTTP request from a cursor fetches the
+next batch of results (usually documents). Depending on the query, the total
+number of items in the result set may or may not be known in advance.
-Here is an example showing how a query cursor can be used:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- # Set up some test data to query
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Set up some test data to query against.
db.collection('students').insert_many([
{'_key': 'Abby', 'age': 22},
{'_key': 'John', 'age': 18},
- {'_key': 'Mary', 'age': 21}
+ {'_key': 'Mary', 'age': 21},
+ {'_key': 'Suzy', 'age': 23},
+ {'_key': 'Dave', 'age': 20}
])
- # Execute an AQL query which returns a cursor object
+ # Execute an AQL query which returns a cursor object.
cursor = db.aql.execute(
- 'FOR s IN students FILTER s.age < @val RETURN s',
- bind_vars={'val': 19},
- batch_size=1,
+ 'FOR doc IN students FILTER doc.age > @val RETURN doc',
+ bind_vars={'val': 17},
+ batch_size=2,
count=True
)
- # Retrieve the cursor ID
+ # Get the cursor ID.
cursor.id
- # Retrieve the documents in the current batch
+ # Get the items in the current batch.
cursor.batch()
- # Check if there are more documents to be fetched
+ # Check if the current batch is empty.
+ cursor.empty()
+
+ # Get the total count of the result set.
+ cursor.count()
+
+ # Flag indicating if there are more to be fetched from server.
cursor.has_more()
- # Retrieve the cursor statistics
+ # Flag indicating if the results are cached.
+ cursor.cached()
+
+ # Get the cursor statistics.
cursor.statistics()
- # Retrieve any warnings produced from the cursor
+ # Get the performance profile.
+ cursor.profile()
+
+ # Get any warnings produced from the query.
cursor.warnings()
- # Return the next document in the batch
- # If the batch is depleted, fetch the next batch
+ # Return the next item from the cursor. If current batch is depleted, the
+ # next batch if fetched from the server automatically.
cursor.next()
- # Delete the cursor from the server
+ # Return the next item from the cursor. If current batch is depleted, an
+ # exception is thrown. You need to fetch the next batch manually.
+ cursor.pop()
+
+ # Fetch the next batch and add them to the cursor object.
+ cursor.fetch()
+
+ # Delete the cursor from the server.
cursor.close()
-Refer to :ref:`Cursor` class for more details.
\ No newline at end of file
+See :ref:`Cursor` for API specification.
+
+If the fetched result batch is depleted while you are iterating over a cursor
+(or while calling the method :func:`arango.cursor.Cursor.next`), python-arango
+automatically sends an HTTP request to the server to fetch the next batch
+(just-in-time style). To control exactly when the fetches occur, you can use
+methods :func:`arango.cursor.Cursor.fetch` and :func:`arango.cursor.Cursor.pop`
+instead.
+
+**Example:**
+
+.. testcode::
+
+ from arango import ArangoClient
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Set up some test data to query against.
+ db.collection('students').insert_many([
+ {'_key': 'Abby', 'age': 22},
+ {'_key': 'John', 'age': 18},
+ {'_key': 'Mary', 'age': 21}
+ ])
+
+ # If you iterate over the cursor or call cursor.next(), batches are
+ # fetched automatically from the server just-in-time style.
+ cursor = db.aql.execute('FOR doc IN students RETURN doc', batch_size=1)
+ result = [doc for doc in cursor]
+
+ # Alternatively, you can manually fetch and pop for finer control.
+ cursor = db.aql.execute('FOR doc IN students RETURN doc', batch_size=1)
+ while cursor.has_more(): # Fetch until nothing is left on the server.
+ cursor.fetch()
+ while not cursor.empty(): # Pop until nothing is left on the cursor.
+ cursor.pop()
+
+When running queries in :doc:`transactions `, cursors are loaded
+with the entire result set right away. This is regardless of the parameters
+passed in when executing the query (e.g. batch_size). You must be mindful of
+client-side memory capacity when executing queries that can potentially return
+a large result set.
+
+**Example:**
+
+.. testcode::
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the total document count in "students" collection.
+ document_count = db.collection('students').count()
+
+ # Execute an AQL query normally (without using transactions).
+ cursor1 = db.aql.execute('FOR doc IN students RETURN doc', batch_size=1)
+
+ # Execute the same AQL query in a transaction.
+ txn_db = db.begin_transaction()
+ job = txn_db.aql.execute('FOR doc IN students RETURN doc', batch_size=1)
+ txn_db.commit()
+ cursor2 = job.result()
+
+ # The first cursor acts as expected. Its current batch contains only 1 item
+ # and it still needs to fetch the rest of its result set from the server.
+ assert len(cursor1.batch()) == 1
+ assert cursor1.has_more() is True
+
+ # The second cursor is pre-loaded with the entire result set, and does not
+ # require further communication with ArangoDB server. Note that value of
+ # parameter "batch_size" was ignored.
+ assert len(cursor2.batch()) == document_count
+ assert cursor2.has_more() is False
diff --git a/docs/database.rst b/docs/database.rst
index d7ef5da0..901f644e 100644
--- a/docs/database.rst
+++ b/docs/database.rst
@@ -1,51 +1,62 @@
-.. _database-page:
-
Databases
---------
-A single ArangoDB instance can house multiple databases, which in turn contain
-their own set of worker processes, :ref:`collections `, and
-:ref:`graphs `. There is always a default database named ``_system``.
-This default database cannot be dropped, can only be accessed with root
-privileges, and provides operations for managing other user-defined databases.
+An ArangoDB server can have an arbitrary number of **databases**. Each database
+has its own set of :doc:`collections ` and :doc:`graphs `.
+There is a *special* database named ``_system``, which always exists by default
+and cannot be dropped. This database provides administrative operations such as
+managing users, permissions and other databases. Many of these operations can
+only be executed by admin users. See :doc:`user` for more information.
-Here is an example showing how databases can be managed with multiple users:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
- # Initialize the ArangoDB client as root
- client = ArangoClient(username='root', password='')
-
- # Create a database, again as root (the user is inherited from client
- # initialization if the username and password are not specified)
- db = client.create_database('my_database', username=None, password=None)
-
- # Retrieve the properties of the new database
- db.properties()
-
- # Create another database, this time with a predefined set of users
- db = client.create_database(
- name='another_database',
- # Users jane, john and jake will have access to the new database
- users=[
- {'username': 'jane', 'password': 'foo', 'active': True},
- {'username': 'john', 'password': 'bar', 'active': True},
- {'username': 'jake', 'password': 'baz', 'active': True},
- ],
- # API calls through this database object uses jake's credentials
- username='jake',
- password='baz'
- )
-
- # To switch to a different user, simply create a new database object with
- # the credentials of the desired user (which in this case would be jane's)
- db = client.database('another_database', username='jane', password='foo')
-
- # Delete an existing database as root
- client.delete_database('another_database')
-
-Refer to :ref:`ArangoClient` and :ref:`Database` classes for more details
-on database management, and the :ref:`user-page` page for more details on user
-management and database access control.
+ # Initialize the ArangoDB client.
+ client = ArangoClient(protocol='http', host='localhost', port=8529)
+
+ # Connect to "_system" database as root user.
+ # This returns an API wrapper for "_system" database.
+ sys_db = client.db('_system', username='root', password='passwd')
+
+ # List all databases.
+ sys_db.databases()
+
+ # Create a new database named "test" if it does not exist.
+ # Only root user has access to it at time of its creation.
+ if not sys_db.has_database('test'):
+ sys_db.create_database('test')
+
+ # Delete the database.
+ sys_db.delete_database('test')
+
+ # Create a new database named "test" along with a new set of users.
+ # Only "jane", "john", "jake" and root user have access to it.
+ if not sys_db.has_database('test'):
+ sys_db.create_database(
+ name='test',
+ users=[
+ {'username': 'jane', 'password': 'foo', 'active': True},
+ {'username': 'john', 'password': 'bar', 'active': True},
+ {'username': 'jake', 'password': 'baz', 'active': True},
+ ],
+ )
+
+ # Connect to the new "test" database as user "jane".
+ db = client.db('test', username='jane', password='foo')
+
+ # Retrieve various database and server information.
+ db.name
+ db.username
+ db.version()
+ db.details()
+ db.collections()
+ db.graphs()
+ db.engine()
+
+ # Delete the database. Note that the new users will remain.
+ sys_db.delete_database('test')
+
+See :ref:`ArangoClient` and :ref:`StandardDatabase` for API specification.
diff --git a/docs/document.rst b/docs/document.rst
index d40da26b..4702c9ca 100644
--- a/docs/document.rst
+++ b/docs/document.rst
@@ -1,118 +1,202 @@
-.. _document-page:
-
Documents
---------
-**Documents** in python-arango are Python dictionaries. They can be nested to
-an arbitrary depth and contain lists. Each document must have the ``"_key"``
-field, whose value identifies the document uniquely within a collection. There
-is also the ``"_id"`` field, whose value identifies the document uniquely across
-*all* collections within a database.
+In python-arango, a **document** is a Python dictionary with the following
+properties:
+
+* Is JSON serializable.
+* May be nested to an arbitrary depth.
+* May contain lists.
+* Contains the ``_key`` field, which identifies the document uniquely within a
+ specific collection.
+* Contains the ``_id`` field (also called the *handle*), which identifies the
+ document uniquely across all collections within a database. This ID is a
+ combination of the collection name and the document key using the format
+ ``{collection}/{key}`` (see example below).
+* Contains the ``_rev`` field. ArangoDB supports MVCC (Multiple Version
+ Concurrency Control) and is capable of storing each document in multiple
+ revisions. Latest revision of a document is indicated by this field. The
+ field is populated by ArangoDB and is not required as input unless you want
+ to validate a document against its current revision.
-ArangoDB supports MVCC (Multiple Version Concurrency Control) and is capable
-of storing each document in multiple revisions. The revision of a document is
-distinguished by the value of the ``"_rev"`` field. For more information on
-documents and their associated terminologies visit this
-`page `__.
+For more information on documents and associated terminologies, refer to
+`ArangoDB manual`_. Here is an example of a valid document in "students"
+collection:
-Here is an example of a valid document:
+.. _ArangoDB manual: https://docs.arangodb.com
-.. code-block:: python
+.. testcode::
{
- '_id': 'students/john',
- '_key': 'john',
- '_rev': '14253647',
- 'first_name': 'John',
- 'last_name': 'Doe',
+ '_id': 'students/bruce',
+ '_key': 'bruce',
+ '_rev': '_Wm3dzEi--_',
+ 'first_name': 'Bruce',
+ 'last_name': 'Wayne',
'address': {
+ 'street' : '1007 Mountain Dr.',
'city': 'Gotham',
- 'zip': 'M1NS93',
- 'street' : '300 Beverly St.'
+ 'state': 'NJ'
},
- 'courses': ['CSC101', 'STA101']
+ 'is_rich': True,
+ 'friends': ['robin', 'gordon']
}
.. _edge-documents:
-**Edge documents** or **edges** are similar to documents but with additional
-required fields ``"_from"`` and ``"_to"``. The values of these fields are the
-values of the ``"_id"`` field of the "from" and "to" vertex documents (see
-:ref:`graphs ` for more details). Edge documents are contained in
-:ref:`edge collections `.
-
-Here is an example of a valid edge document:
+**Edge documents (edges)** are similar to standard documents but with two
+additional required fields ``_from`` and ``_to``. Values of these fields must
+be the handles of "from" and "to" vertex documents linked by the edge document
+in question (see :doc:`graph` for details). Edge documents are contained in
+:ref:`edge collections `. Here is an example of a valid edge
+document in "friends" edge collection:
-.. code-block:: python
+.. testcode::
{
- '_id': 'knows/001',
+ '_id': 'friends/001',
'_key': '001',
- '_rev': '23891346',
+ '_rev': '_Wm3dyle--_',
'_from': 'students/john',
'_to': 'students/jane',
- 'friends': True,
- 'family': False
+ 'closeness': 9.5
}
+Standard documents are managed via collection API wrapper:
-Here is an example showing how documents can be managed:
-
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for "students" collection.
students = db.collection('students')
+ # Create some test documents to play around with.
lola = {'_key': 'lola', 'GPA': 3.5, 'first': 'Lola', 'last': 'Martin'}
abby = {'_key': 'abby', 'GPA': 3.2, 'first': 'Abby', 'last': 'Page'}
john = {'_key': 'john', 'GPA': 3.6, 'first': 'John', 'last': 'Kim'}
emma = {'_key': 'emma', 'GPA': 4.0, 'first': 'Emma', 'last': 'Park'}
- # Insert a new document
- result = students.insert(lola)
- print(result['_id'], result['_key'], result['_rev'])
+ # Insert a new document. This returns the document metadata.
+ metadata = students.insert(lola)
+ assert metadata['_id'] == 'students/lola'
+ assert metadata['_key'] == 'lola'
+
+ # Check if documents exist in the collection in multiple ways.
+ assert students.has('lola') and 'john' not in students
- # Retrieve document information
- students.has('lola') and 'john' in students
- students.count()
- len(students) > 5
+ # Retrieve the total document count in multiple ways.
+ assert students.count() == len(students) == 1
- # Insert multiple documents in bulk
+ # Insert multiple documents in bulk.
students.import_bulk([abby, john, emma])
- # Retrieve one or more matching documents
+ # Retrieve one or more matching documents.
for student in students.find({'first': 'John'}):
- print(student['_key'], student['GPA'])
+ assert student['_key'] == 'john'
+ assert student['GPA'] == 3.6
+ assert student['last'] == 'Kim'
- # Retrieve a single document
+ # Retrieve a document by key.
students.get('john')
- # Retrieve multiple documents
- students.get_many(['abby', 'lola'])
+ # Retrieve a document by ID.
+ students.get('students/john')
+
+ # Retrieve a document by body with "_id" field.
+ students.get({'_id': 'students/john'})
+
+ # Retrieve a document by body with "_key" field.
+ students.get({'_key': 'john'})
- # Update a single document
+ # Retrieve multiple documents by ID, key or body.
+ students.get_many(['abby', 'students/lola', {'_key': 'john'}])
+
+ # Update a single document.
lola['GPA'] = 2.6
students.update(lola)
- # Update one or more matching documents
+ # Update one or more matching documents.
students.update_match({'last': 'Park'}, {'GPA': 3.0})
- # Replace documents by filters
+ # Replace a single document.
+ emma['GPA'] = 3.1
+ students.replace(emma)
+
+ # Replace one or more matching documents.
becky = {'first': 'Becky', 'last': 'Solis', 'GPA': '3.3'}
students.replace_match({'first': 'Emma'}, becky)
- # Replace a single document
- emma['GPA'] = 3.1
- students.replace(emma)
+ # Delete a document by key.
+ students.delete('john')
+
+ # Delete a document by ID.
+ students.delete('students/lola')
+
+ # Delete a document by body with "_id" or "_key" field.
+ students.delete(emma)
- # Iterate through all documents and update
+ # Delete multiple documents. Missing ones are ignored.
+ students.delete_many([abby, 'john', 'students/lola'])
+
+ # Iterate through all documents and update individually.
for student in students:
student['GPA'] = 4.0
student['happy'] = True
students.update(student)
-Refer to :ref:`Collection` class for more details on the operations shown
-above.
+You can manage documents via database API wrappers also, but only simple
+operations (i.e. get, insert, update, replace, delete) are supported and you
+must provide document IDs instead of keys:
+
+.. testcode::
+
+ from arango import ArangoClient
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Create some test documents to play around with.
+ # The documents must have the "_id" field instead.
+ lola = {'_id': 'students/lola', 'GPA': 3.5}
+ abby = {'_id': 'students/abby', 'GPA': 3.2}
+ john = {'_id': 'students/john', 'GPA': 3.6}
+ emma = {'_id': 'students/emma', 'GPA': 4.0}
+
+ # Insert a new document.
+ metadata = db.insert_document('students', lola)
+ assert metadata['_id'] == 'students/lola'
+ assert metadata['_key'] == 'lola'
+
+ # Check if a document exists.
+ assert db.has_document(lola) is True
+
+ # Get a document (by ID or body with "_id" field).
+ db.document('students/lola')
+ db.document(abby)
+
+ # Update a document.
+ lola['GPA'] = 3.6
+ db.update_document(lola)
+
+ # Replace a document.
+ lola['GPA'] = 3.4
+ db.replace_document(lola)
+
+ # Delete a document (by ID or body with "_id" field).
+ db.delete_document('students/lola')
+
+See :ref:`StandardDatabase` and :ref:`StandardCollection` for API specification.
+
+When managing documents, using collection API wrappers over database API
+wrappers is recommended as more operations are available and less sanity
+checking is performed under the hood.
diff --git a/docs/errors.rst b/docs/errors.rst
index 82e2ad0e..b511102a 100644
--- a/docs/errors.rst
+++ b/docs/errors.rst
@@ -2,36 +2,118 @@ Error Handling
--------------
All python-arango exceptions inherit :class:`arango.exceptions.ArangoError`,
-which lightly wraps around the HTTP error responses returned from ArangoDB.
-The majority of the error messages in the exceptions raised by python-arango
-come directly from the server.
+which splits into subclasses :class:`arango.exceptions.ArangoServerError` and
+:class:`arango.exceptions.ArangoClientError`.
-Here is an example showing how a python-arango exception can be handled:
+Server Errors
+=============
-.. code-block:: python
+:class:`arango.exceptions.ArangoServerError` exceptions lightly wrap non-2xx
+HTTP responses coming from ArangoDB. Each exception contains the error message,
+error code and HTTP request response details.
- from arango import ArangoClient, ArangoError
+**Example:**
+.. testcode::
+
+ from arango import ArangoClient, ArangoServerError, DocumentInsertError
+
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for "students" collection.
students = db.collection('students')
try:
students.insert({'_key': 'John'})
- students.insert({'_key': 'John'}) # unique constraint violation
- except ArangoError as exc:
- print(repr(exc))
- print(exc.message)
- print(exc.error_code)
- print(exc.url)
- print(exc.http_method)
- print(exc.http_code)
- print(exc.http_headers)
+ students.insert({'_key': 'John'}) # duplicate key error
+
+ except DocumentInsertError as exc:
+
+ assert isinstance(exc, ArangoServerError)
+ assert exc.source == 'server'
+
+ exc.message # Exception message usually from ArangoDB
+ exc.error_message # Raw error message from ArangoDB
+ exc.error_code # Error code from ArangoDB
+ exc.url # URL (API endpoint)
+ exc.http_method # HTTP method (e.g. "POST")
+ exc.http_headers # Response headers
+ exc.http_code # Status code (e.g. 200)
+
+ # You can inspect the ArangoDB response directly.
+ response = exc.response
+ response.method # HTTP method (e.g. "POST")
+ response.headers # Response headers
+ response.url # Full request URL
+ response.is_success # Set to True if HTTP code is 2XX
+ response.body # JSON-deserialized response body
+ response.raw_body # Raw string response body
+ response.status_text # Status text (e.g "OK")
+ response.status_code # Status code (e.g. 200)
+ response.error_code # Error code from ArangoDB
+
+ # You can also inspect the request sent to ArangoDB.
+ request = exc.request
+ request.method # HTTP method (e.g. "post")
+ request.endpoint # API endpoint starting with "/_api"
+ request.headers # Request headers
+ request.params # URL parameters
+ request.data # Request payload
+ request.read # Read collections (used for transactions only)
+ request.write # Write collections (used for transactions only)
+ request.command # ArangoSh command (used for transactions only)
+
+See :ref:`Response` and :ref:`Request` for reference.
+
+Client Errors
+=============
+
+:class:`arango.exceptions.ArangoClientError` exceptions originate from
+python-arango client itself. They do not contain error codes nor HTTP request
+response details.
+
+**Example:**
+
+.. testcode::
+
+ from arango import ArangoClient, ArangoClientError, DocumentParseError
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for "students" collection.
+ students = db.collection('students')
+
+ try:
+ students.get({'_id': 'invalid_id'}) # malformed document
+
+ except DocumentParseError as exc:
+
+ assert isinstance(exc, ArangoClientError)
+ assert exc.source == 'client'
+
+ # Only the error message is set.
+ error_message = exc.message
+ assert exc.error_code is None
+ assert exc.error_message is None
+ assert exc.url is None
+ assert exc.http_method is None
+ assert exc.http_code is None
+ assert exc.http_headers is None
+ assert exc.response is None
+ assert exc.request is None
Exceptions
==========
-Below are all exceptions available in python-arango.
+Below are all exceptions from python-arango.
.. automodule:: arango.exceptions
:members:
diff --git a/docs/foxx.rst b/docs/foxx.rst
new file mode 100644
index 00000000..92baaeb2
--- /dev/null
+++ b/docs/foxx.rst
@@ -0,0 +1,107 @@
+Foxx
+----
+
+Python-arango supports **Foxx**, a microservice framework which lets you define
+custom HTTP endpoints to extend ArangoDB's REST API. For more information, refer
+to `ArangoDB manual`_.
+
+.. _ArangoDB manual: https://docs.arangodb.com
+
+**Example:**
+
+.. testsetup::
+
+ import os
+ import sys
+
+ if os.getenv('TRAVIS', False):
+ service_file = os.path.join(os.sep, 'tmp', 'service.zip')
+ else:
+ cwd = os.getcwd()
+ if cwd.endswith('docs'):
+ cwd = cwd[:-4]
+ service_file = os.path.join(cwd, 'tests', 'static', 'service.zip')
+
+.. testcode::
+
+ from arango import ArangoClient
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "_system" database as root user.
+ db = client.db('_system', username='root', password='passwd')
+
+ # Get the Foxx API wrapper.
+ foxx = db.foxx
+
+ # Define the test mount point.
+ service_mount = '/test_mount'
+
+ # List services.
+ foxx.services()
+
+ # Create a service.
+ foxx.create_service(
+ mount=service_mount,
+ source=service_file, # "/home/example/services.zip"
+ config={},
+ dependencies={},
+ development=True,
+ setup=True,
+ legacy=True
+ )
+
+ # Update (upgrade) a service.
+ service = db.foxx.update_service(
+ mount=service_mount,
+ source=service_file,
+ config={},
+ dependencies={},
+ teardown=True,
+ setup=True,
+ legacy=False
+ )
+
+ # Replace (overwrite) a service.
+ service = db.foxx.replace_service(
+ mount=service_mount,
+ source=service_file,
+ config={},
+ dependencies={},
+ teardown=True,
+ setup=True,
+ legacy=True,
+ force=False
+ )
+
+ # Get service details.
+ foxx.service(service_mount)
+
+ # Manage service configuration.
+ foxx.config(service_mount)
+ foxx.update_config(service_mount, config={})
+ foxx.replace_config(service_mount, config={})
+
+ # Manage service dependencies.
+ foxx.dependencies(service_mount)
+ foxx.update_dependencies(service_mount, dependencies={})
+ foxx.replace_dependencies(service_mount, dependencies={})
+
+ # Toggle development mode for a service.
+ foxx.enable_development(service_mount)
+ foxx.disable_development(service_mount)
+
+ # Other miscellaneous functions.
+ foxx.readme(service_mount)
+ foxx.swagger(service_mount)
+ foxx.download(service_mount)
+ foxx.commit(service_mount)
+ foxx.scripts(service_mount)
+ foxx.run_script(service_mount, 'setup', [])
+ foxx.run_tests(service_mount, reporter='xunit', output_format='xml')
+
+ # Delete a service.
+ foxx.delete_service(service_mount)
+
+See :ref:`Foxx` for API specification.
diff --git a/docs/graph.rst b/docs/graph.rst
index 08e30caa..fa227b64 100644
--- a/docs/graph.rst
+++ b/docs/graph.rst
@@ -1,143 +1,292 @@
-.. _graph-page:
-
Graphs
------
-A **graph** consists of **vertices** and **edges**. Edges are stored as
-documents in :ref:`edge collections `, whereas vertices
-are stored as documents in :ref:`vertex collections `
-(edges can be vertices also). The combination of edge and vertex collections
-used in a graph is specified in :ref:`edge definitions `.
-For more information on graphs, vertices and edges visit this
-`page `__.
+A **graph** consists of vertices and edges. Vertices are stored as documents in
+:ref:`vertex collections ` and edges stored as documents in
+:ref:`edge collections `. The collections used in a graph and
+their relations are specified with :ref:`edge definitions `.
+For more information, refer to `ArangoDB manual`_.
+
+.. _ArangoDB manual: https://docs.arangodb.com
-Here is an example showing how a graph can be created or deleted:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- # List existing graphs
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # List existing graphs in the database.
db.graphs()
- # Create a new graph
- schedule = db.create_graph('schedule')
+ # Create a new graph named "school" if it does not already exist.
+ # This returns an API wrapper for "school" graph.
+ if db.has_graph('school'):
+ school = db.graph('school')
+ else:
+ school = db.create_graph('school')
+
+ # Retrieve various graph properties.
+ school.name
+ school.db_name
+ school.vertex_collections()
+ school.edge_definitions()
+
+ # Delete the graph.
+ db.delete_graph('school')
+
+.. _edge-definitions:
+
+Edge Definitions
+================
+
+An **edge definition** specifies a directed relation in a graph. A graph can
+have arbitrary number of edge definitions. Each edge definition consists of the
+following components:
+
+* **From Vertex Collections:** contain "from" vertices referencing "to" vertices.
+* **To Vertex Collections:** contain "to" vertices referenced by "from" vertices.
+* **Edge Collection:** contains edges that link "from" and "to" vertices.
+
+Here is an example body of an edge definition:
+
+.. testcode::
- # Retrieve the graph properties
- schedule.properties()
+ {
+ 'edge_collection': 'teach',
+ 'from_vertex_collections': ['teachers'],
+ 'to_vertex_collections': ['lectures']
+ }
- # Delete an existing graph
- db.delete_graph('schedule')
+Here is an example showing how edge definitions are managed:
+
+.. testcode::
+
+ from arango import ArangoClient
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for graph "school".
+ if db.has_graph('school'):
+ school = db.graph('school')
+ else:
+ school = db.create_graph('school')
+
+ # Create an edge definition named "teach". This creates any missing
+ # collections and returns an API wrapper for "teach" edge collection.
+ if not school.has_edge_definition('teach'):
+ teach = school.create_edge_definition(
+ edge_collection='teach',
+ from_vertex_collections=['teachers'],
+ to_vertex_collections=['teachers']
+ )
+
+ # List edge definitions.
+ school.edge_definitions()
+
+ # Replace the edge definition.
+ school.replace_edge_definition(
+ edge_collection='teach',
+ from_vertex_collections=['teachers'],
+ to_vertex_collections=['lectures']
+ )
+
+ # Delete the edge definition (and its collections).
+ school.delete_edge_definition('teach', purge=True)
.. _vertex-collections:
Vertex Collections
==================
-A **vertex collection** consists of vertex documents. It is uniquely identified
-by its name, which must consist only of alphanumeric characters, hyphen and
-the underscore characters. Vertex collections share their namespace with other
-types of collections.
+A **vertex collection** contains vertex documents, and shares its namespace
+with all other types of collections. Each graph can have an arbitrary number of
+vertex collections. Vertex collections that are not part of any edge definition
+are called **orphan collections**. You can manage vertex documents via standard
+collection API wrappers, but using vertex collection API wrappers provides
+additional safeguards:
-The documents in a vertex collection are fully accessible from a standard
-collection. Managing documents through a vertex collection, however, adds
-additional safeguards: all modifications are executed in transactions, and
-if a vertex is deleted, all connected edges are also automatically deleted.
+* All modifications are executed in transactions.
+* If a vertex is deleted, all connected edges are also automatically deleted.
-Here is an example showing how vertex collections and vertices can be used:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- schedule = db.graph('schedule')
- # Create a new vertex collection
- profs = schedule.create_vertex_collection('profs')
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
- # List orphan vertex collections (without edges)
- schedule.orphan_collections()
+ # Get the API wrapper for graph "school".
+ school = db.graph('school')
- # List existing vertex collections
- schedule.vertex_collections()
+ # Create a new vertex collection named "teachers" if it does not exist.
+ # This returns an API wrapper for "teachers" vertex collection.
+ if school.has_vertex_collection('teachers'):
+ teachers = school.vertex_collection('teachers')
+ else:
+ teachers = school.create_vertex_collection('teachers')
- # Retrieve an existing vertex collection
- profs = schedule.vertex_collection('profs')
+ # List vertex collections in the graph.
+ school.vertex_collections()
- # Vertex collections have a similar interface to standard collections
- profs.insert({'_key': 'donald', 'name': 'Professor Donald'})
- profs.get('donald')
- profs.properties()
+ # Vertex collections have similar interface as standard collections.
+ teachers.properties()
+ teachers.insert({'_key': 'jon', 'name': 'Jon'})
+ teachers.update({'_key': 'jon', 'age': 35})
+ teachers.replace({'_key': 'jon', 'name': 'Jon', 'age': 36})
+ teachers.get('jon')
+ teachers.has('jon')
+ teachers.delete('jon')
- # Delete an existing vertex collection
- schedule.delete_vertex_collection('profs', purge=True)
+You can manage vertices via graph API wrappers also, but you must use document
+IDs instead of keys where applicable.
-Refer to :ref:`Graph` and :ref:`VertexCollection` classes for more details.
+**Example:**
-.. _edge-definitions:
+.. testcode::
-Edge Definitions
-================
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for graph "school".
+ school = db.graph('school')
-An **edge definition** specifies which vertex and edge collections are used in
-a particular graph.
+ # The "_id" field is required instead of "_key" field.
+ school.insert_vertex('teachers', {'_key': 'jon', 'name': 'Jon'})
+ school.update_vertex({'_id': 'teachers/jon', 'age': 35})
+ school.replace_vertex({'_id': 'teachers/jon', 'name': 'Jon', 'age':36})
+ school.has_vertex('teachers/jon')
+ school.vertex('teachers/jon')
+ school.delete_vertex('teachers/jon')
+
+See :ref:`Graph` and :ref:`VertexCollection` for API specification.
.. _edge-collections:
-An **edge collection** consists of edge documents. It is uniquely identified
-by its name which must consist only of alphanumeric characters, hyphen and the
-underscore characters. Edge collections share their namespace with other types
-of collections.
+Edge Collections
+================
+
+An **edge collection** contains :ref:`edge documents `, and
+shares its namespace with all other types of collections. You can manage edge
+documents via standard collection API wrappers, but using edge collection API
+wrappers provides additional safeguards:
-The documents in an edge collection are fully accessible from a standard
-collection. Managing documents through an edge collection, however, adds
-additional safeguards: all modifications are executed in transactions and
-edge documents are checked against the edge definitions on insert.
+* All modifications are executed in transactions.
+* Edge documents are checked against the edge definitions on insert.
-Here is an example showing how an edge definition can be created and used:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- schedule = db.graph('schedule')
-
- # Create a couple of vertex collections
- schedule.create_vertex_collection('profs')
- schedule.create_vertex_collection('courses')
-
- # Create a new edge definition (and a new edge collection)
- schedule.create_edge_definition(
- name='teaches',
- from_collections=['profs'],
- to_collections=['courses']
- )
- # List existing edge definitions
- schedule.edge_definitions()
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for graph "school".
+ school = db.graph('school')
+
+ # Get the API wrapper for edge collection "teach".
+ if school.has_edge_definition('teach'):
+ teach = school.edge_collection('teach')
+ else:
+ teach = school.create_edge_definition(
+ edge_collection='teach',
+ from_vertex_collections=['teachers'],
+ to_vertex_collections=['lectures']
+ )
+
+ # Edge collections have a similar interface as standard collections.
+ teach.insert({
+ '_key': 'jon-CSC101',
+ '_from': 'teachers/jon',
+ '_to': 'lectures/CSC101'
+ })
+ teach.replace({
+ '_key': 'jon-CSC101',
+ '_from': 'teachers/jon',
+ '_to': 'lectures/CSC101',
+ 'online': False
+ })
+ teach.update({
+ '_key': 'jon-CSC101',
+ 'online': True
+ })
+ teach.has('jon-CSC101')
+ teach.get('jon-CSC101')
+ teach.delete('jon-CSC101')
+
+ # Create an edge between two vertices (essentially the same as insert).
+ teach.link('teachers/jon', 'lectures/CSC101', data={'online': False})
- # Retrieve an existing edge collection
- teaches = schedule.edge_collection('teaches')
+ # List edges going in/out of a vertex.
+ teach.edges('teachers/jon', direction='in')
+ teach.edges('teachers/jon', direction='out')
- # Edge collections have a similar interface to standard collections
- teaches.insert({
- '_key': 'michelle-CSC101',
- '_from': 'profs/michelle',
- '_to': 'courses/CSC101'
- })
- print(teaches.get('michelle-CSC101'))
+You can manage edges via graph API wrappers also, but you must use document
+IDs instead of keys where applicable.
+
+**Example:**
+
+.. testcode::
+
+ from arango import ArangoClient
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
- # Delete an existing edge definition (and the collection)
- schedule.delete_edge_definition('teaches', purge=True)
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
-Refer to :ref:`Graph` and :ref:`EdgeCollection` classes for more details.
+ # Get the API wrapper for graph "school".
+ school = db.graph('school')
+
+ # The "_id" field is required instead of "_key" field.
+ school.insert_edge(
+ collection='teach',
+ edge={
+ '_id': 'teach/jon-CSC101',
+ '_from': 'teachers/jon',
+ '_to': 'lectures/CSC101'
+ }
+ )
+ school.replace_edge({
+ '_id': 'teach/jon-CSC101',
+ '_from': 'teachers/jon',
+ '_to': 'lectures/CSC101',
+ 'online': False,
+ })
+ school.update_edge({
+ '_id': 'teach/jon-CSC101',
+ 'online': True
+ })
+ school.has_edge('teach/jon-CSC101')
+ school.edge('teach/jon-CSC101')
+ school.delete_edge('teach/jon-CSC101')
+ school.link('teach', 'teachers/jon', 'lectures/CSC101')
+ school.edges('teach', 'teachers/jon', direction='out')
+
+See :ref:`Graph` and :ref:`EdgeCollection` for API specification.
.. _graph-traversals:
@@ -145,46 +294,49 @@ Graph Traversals
================
**Graph traversals** are executed via the :func:`arango.graph.Graph.traverse`
-method. A traversal can span across multiple vertex collections and walk over
-the documents in a variety of ways.
+method. Each traversal can span across multiple vertex collections, and walk
+over edges and vertices using various algorithms.
-Here is an example of a graph traversal:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
-
- # Define a new graph
- schedule = db.create_graph('schedule')
- profs = schedule.create_vertex_collection('profs')
- courses = schedule.create_vertex_collection('courses')
- teaches = schedule.create_edge_definition(
- name='teaches',
- from_collections=['profs'],
- to_collections=['courses']
- )
- # Insert vertices into the graph
- profs.insert({'_key': 'michelle', 'name': 'Professor Michelle'})
- courses.insert({'_key': 'CSC101', 'name': 'Introduction to CS'})
- courses.insert({'_key': 'MAT223', 'name': 'Linear Algebra'})
- courses.insert({'_key': 'STA201', 'name': 'Statistics'})
-
- # Insert edges into the graph
- teaches.insert({'_from': 'profs/michelle', '_to': 'courses/CSC101'})
- teaches.insert({'_from': 'profs/michelle', '_to': 'courses/STA201'})
- teaches.insert({'_from': 'profs/michelle', '_to': 'courses/MAT223'})
-
- # Traverse the graph in outbound direction, breath-first
- traversal_results = schedule.traverse(
- start_vertex='profs/michelle',
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for graph "school".
+ school = db.graph('school')
+
+ # Get API wrappers for "from" and "to" vertex collections.
+ teachers = school.vertex_collection('teachers')
+ lectures = school.vertex_collection('lectures')
+
+ # Get the API wrapper for the edge collection.:
+ teach = school.edge_collection('teach')
+
+ # Insert vertices into the graph.
+ teachers.insert({'_key': 'jon', 'name': 'Professor jon'})
+ lectures.insert({'_key': 'CSC101', 'name': 'Introduction to CS'})
+ lectures.insert({'_key': 'MAT223', 'name': 'Linear Algebra'})
+ lectures.insert({'_key': 'STA201', 'name': 'Statistics'})
+
+ # Insert edges into the graph.
+ teach.insert({'_from': 'teachers/jon', '_to': 'lectures/CSC101'})
+ teach.insert({'_from': 'teachers/jon', '_to': 'lectures/STA201'})
+ teach.insert({'_from': 'teachers/jon', '_to': 'lectures/MAT223'})
+
+ # Traverse the graph in outbound direction, breath-first.
+ school.traverse(
+ start_vertex='teachers/jon',
direction='outbound',
strategy='bfs',
edge_uniqueness='global',
vertex_uniqueness='global',
)
- print(traversal_results)
-Refer to :ref:`Graph` class for more details.
+See :func:`arango.graph.Graph.traverse` for API specification.
diff --git a/docs/http.rst b/docs/http.rst
index 74a8fa98..98ab0bdf 100644
--- a/docs/http.rst
+++ b/docs/http.rst
@@ -1,159 +1,157 @@
-.. _http-client-page:
+Using Custom HTTP Clients
+-------------------------
-Using Your Own HTTP Clients
----------------------------
+Python-arango lets you use your own HTTP clients for sending API requests to
+ArangoDB server. The default implementation uses the Requests_ library.
-**Python-arango** allows you to use your own custom HTTP clients. Your client
-must inherit from :class:`arango.http_clients.base.BaseHTTPClient`, and all of
-its abstract methods must be implemented/overridden.
+Your HTTP client must inherit :class:`arango.http.HTTPClient` and implement its
+abstract method :func:`arango.http.HTTPClient.send_request`. The method must
+return valid (fully populated) instances of :class:`arango.response.Response`.
-The overridden methods must conform to their original method signatures and
-return instances of :class:`arango.response.Response`.
+For example, let's say you want to use your own HTTP client with:
-Let's go through a quick example. Let's say you want to use an HTTP client with
-built-in retries and SSL certificate verification disabled. You may want to
-define your ``MyCustomHTTPClient`` class as follows:
+* Automatic retries
+* Additional HTTP header called ``x-my-header``
+* SSL certificate verification disabled
+* Custom logging
-.. code-block:: python
+Your ``CustomHTTPClient`` class might look something like this:
- from __future__ import absolute_import, unicode_literals
+.. testcode::
+
+ import logging
from requests.adapters import HTTPAdapter
- from requests import Session, exceptions
+ from requests import Session
from arango.response import Response
- from arango.http_clients.base import BaseHTTPClient
+ from arango.http import HTTPClient
- class MyCustomHTTPClient(BaseHTTPClient):
+ class CustomHTTPClient(HTTPClient):
+ """My custom HTTP client with cool features."""
- def __init__(self, retries=5):
+ def __init__(self):
self._session = Session()
- self._session.mount('https://', HTTPAdapter(max_retries=retries))
- self._check_cert = False
- def head(self, url, params=None, headers=None, auth=None):
- res = self._session.head(
- url=url,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="head",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
+ # Initialize your logger.
+ self._logger = logging.getLogger('my_logger')
- def get(self, url, params=None, headers=None, auth=None):
- res = self._session.get(
- url=url,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="get",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
+ # Add your request headers.
+ self._session.headers.update({'x-my-header': 'true'})
- def put(self, url, data, params=None, headers=None, auth=None):
- res = self._session.put(
- url=url,
- data=data,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="put",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
+ # Enable retries.
+ adapter = HTTPAdapter(max_retries=5)
+ self._session.mount('https://', adapter)
- def post(self, url, data, params=None, headers=None, auth=None):
- res = self._session.post(
- url=url,
- data=data,
- params=params,
- headers=headers,
- auth=auth,
- verify=self._check_cert
- )
- return Response(
- url=url,
- method="post",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
- )
+ def send_request(self,
+ method,
+ url,
+ params=None,
+ data=None,
+ headers=None,
+ auth=None):
+
+ # Add your own debug statement.
+ self._logger.debug('Sending request to {}'.format(url))
- def patch(self, url, data, params=None, headers=None, auth=None):
- res = self._session.patch(
+ # Send a request.
+ response = self._session.request(
+ method=method,
url=url,
- data=data,
params=params,
+ data=data,
headers=headers,
auth=auth,
- verify=self._check_cert
+ verify=False # Disable SSL verification
)
+ self._logger.debug('Got {}'.format(response.status_code))
+
+ # Return an instance of arango.response.Response per spec.
return Response(
- url=url,
- method="patch",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
+ method=response.request.method,
+ url=response.url,
+ headers=response.headers,
+ status_code=response.status_code,
+ status_text=response.reason,
+ raw_body=response.text,
)
- def delete(self, url, data=None, params=None, headers=None, auth=None):
- res = self._session.delete(
+Then you would inject your client as follows:
+
+.. testsetup::
+
+ import logging
+
+ from requests.adapters import HTTPAdapter
+ from requests import Session
+
+ from arango.response import Response
+ from arango.http import HTTPClient
+
+ class CustomHTTPClient(HTTPClient):
+ """Custom HTTP client."""
+
+ def __init__(self):
+ self._session = Session()
+
+ # Initialize logger.
+ self._logger = logging.getLogger('my_logger')
+
+ # Add request headers.
+ self._session.headers.update({'x-my-header': 'true'})
+
+ # Add retries.
+ adapter = HTTPAdapter(max_retries=5)
+ self._session.mount('https://', adapter)
+
+ def send_request(self,
+ method,
+ url,
+ params=None,
+ data=None,
+ headers=None,
+ auth=None):
+ # Add your own debug statement.
+ self._logger.debug('Sending request to {}'.format(url))
+
+ # Send a request without SSL verification.
+ response = self._session.request(
+ method=method,
url=url,
- data=data,
params=params,
+ data=data,
headers=headers,
auth=auth,
- verify=self._check_cert
+ verify=False # No SSL verification
)
+ self._logger.debug('Got {}'.format(response.status_code))
+
+ # You must return an instance of arango.response.Response.
return Response(
- url=url,
- method="delete",
- headers=res.headers,
- http_code=res.status_code,
- http_text=res.reason,
- body=res.text
+ method=response.request.method,
+ url=response.url,
+ headers=response.headers,
+ status_code=response.status_code,
+ status_text=response.reason,
+ raw_body=response.text,
)
-
-Then you would inject your HTTP client as shown below:
-
-.. code-block:: python
-
- from my_module import MyCustomHTTPClient
+.. testcode::
from arango import ArangoClient
+ # from my_module import CustomHTTPClient
+
client = ArangoClient(
- username='root',
- password='',
- http_client=MyCustomHTTPClient(retries=10),
- use_session=True, # This flag (used in the default client) is now ignored
- check_cert=True # This flag (used in the default client) is now ignored
+ protocol='http',
+ host='localhost',
+ port=8529,
+ http_client=CustomHTTPClient()
)
-Refer to the default HTTP client used by **python-arango** itself for another example
-`here `__.
\ No newline at end of file
+For more information on how to use a ``requests.Session``, refer to
+`Requests documentation`_.
+
+.. _Requests: https://github.com/requests/requests
+.. _Requests documentation: http://docs.python-requests.org/en/master/user/advanced/#session-objects
\ No newline at end of file
diff --git a/docs/index.rst b/docs/index.rst
index 7d1f972d..395f99c6 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,29 +1,24 @@
-.. python-arango documentation master file, created by
- sphinx-quickstart on Sun Jul 24 17:17:48 2016.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
.. image:: /static/logo.png
|
-Welcome to the documentation for **python-arango**, a Python driver for
-`ArangoDB `__.
+Welcome to the documentation for **python-arango**, a Python driver for ArangoDB_.
Features
========
-- Clean, Pythonic interface
+- Clean Pythonic interface
- Lightweight
- High ArangoDB REST API coverage
Compatibility
=============
-- Python versions 2.7.x, 3.4.x, 3.5.x and 3.6.x are supported
-- Latest version of python-arango (3.x) supports ArangoDB 3.x only
-- Older versions of python-arango support ArangoDB 1.x ~ 2.x only
+- Python versions 2.7, 3.4, 3.5 and 3.6 are supported
+- Python-arango 4.x supports ArangoDB 3.3+ (recommended)
+- Python-arango 3.x supports ArangoDB 3.0 ~ 3.2 only
+- Python-arango 2.x supports ArangoDB 1.x ~ 2.x only
Installation
============
@@ -44,6 +39,7 @@ To install the latest version directly from GitHub_:
You may need to use ``sudo`` depending on your environment.
+.. _ArangoDB: https://www.arangodb.com
.. _PyPi: https://pypi.python.org/pypi/python-arango
.. _GitHub: https://github.com/joowani/python-arango
@@ -54,7 +50,7 @@ Contents
.. toctree::
:maxdepth: 1
- client
+ overview
database
collection
document
@@ -70,8 +66,10 @@ Contents
task
wal
pregel
+ foxx
threading
errors
logging
http
- classes
+ contributing
+ specs
diff --git a/docs/indexes.rst b/docs/indexes.rst
index 6aeb1c38..d1a10c32 100644
--- a/docs/indexes.rst
+++ b/docs/indexes.rst
@@ -1,43 +1,49 @@
-.. _index-page:
-
Indexes
-------
**Indexes** can be added to collections to speed up document lookups. Every
-collection has a primary hash index on the ``"_key"`` field by default. This
-index cannot be deleted or modified. Every edge collection has additional edge
-index on fields ``"_from"`` and ``"_to"``.
+collection has a primary hash index on ``_key`` field by default. This index
+cannot be deleted or modified. Every edge collection has additional indexes
+on fields ``_from`` and ``_to``. For more information on indexes, refer to
+`ArangoDB manual`_.
+
+.. _ArangoDB manual: https://docs.arangodb.com
-Here is an example showing how indexes can be added or removed:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Create a new collection named "cities".
cities = db.create_collection('cities')
- # List the indexes in the collection
+ # List the indexes in the collection.
cities.indexes()
- # Add a new hash index on fields 'continent' and 'country'
- cities.add_hash_index(fields=['continent', 'country'], unique=True)
+ # Add a new hash index on document fields "continent" and "country".
+ index = cities.add_hash_index(fields=['continent', 'country'], unique=True)
- # Add new fulltext indices on fields 'continent' and 'country'
- cities.add_fulltext_index(fields=['continent'])
- cities.add_fulltext_index(fields=['country'])
+ # Add new fulltext indexes on fields "continent" and "country".
+ index = cities.add_fulltext_index(fields=['continent'])
+ index = cities.add_fulltext_index(fields=['country'])
- # Add a new skiplist index on field 'population'
- cities.add_skiplist_index(fields=['population'], sparse=False)
+ # Add a new skiplist index on field 'population'.
+ index = cities.add_skiplist_index(fields=['population'], sparse=False)
- # Add a new geo-spatial index on field 'coordinates'
- cities.add_geo_index(fields=['coordinates'])
+ # Add a new geo-spatial index on field 'coordinates'.
+ index = cities.add_geo_index(fields=['coordinates'])
- # Add a new persistent index on fields 'currency'
- cities.add_persistent_index(fields=['currency'], unique=True, sparse=True)
+ # Add a new persistent index on fields 'currency'.
+ index = cities.add_persistent_index(fields=['currency'], sparse=True)
- # Delete an existing index from the collection
- cities.delete_index('some_index_id')
+ # Delete the last index from the collection.
+ cities.delete_index(index['id'])
-Refer to :ref:`Collection` class for more details.
+See :ref:`StandardCollection` for API specification.
diff --git a/docs/logging.rst b/docs/logging.rst
index 15671fc3..79d6724a 100644
--- a/docs/logging.rst
+++ b/docs/logging.rst
@@ -1,92 +1,21 @@
-.. _logging-page:
-
Logging
-------
-By default, :class:`arango.client.ArangoClient` records API call history using
-the ``arango`` logger at ``logging.DEBUG`` level.
-
-Here is an example showing how the logger can be enabled and customized:
-
-.. code-block:: python
-
- import logging
-
- from arango import ArangoClient
+In order to see full HTTP request response details, you can modify logger
+settings for Requests_ library, which python-arango uses under the hood:
- logger = logging.getLogger('arango')
-
- # Set the logging level
- logger.setLevel(logging.DEBUG)
-
- # Attach a handler
- handler = logging.StreamHandler()
- formatter = logging.Formatter('[%(levelname)s] %(message)s')
- handler.setFormatter(formatter)
- logger.addHandler(handler)
-
- # Initialize and use the client to see the changes
- client = ArangoClient(
- username='root',
- password='',
- enable_logging=True
- )
- client.databases()
- client.endpoints()
- client.log_levels()
-
-Alternatively, a custom logger can be created from scratch and injected:
-
-.. code-block:: python
-
- import logging
-
- from arango import ArangoClient
-
- # Create a custom logger
- logger = logging.getLogger('my_custom_logger')
-
- # Set the logging level
- logger.setLevel(logging.DEBUG)
-
- # Attach a handler
- handler = logging.StreamHandler()
- formatter = logging.Formatter('[%(levelname)s] %(message)s')
- handler.setFormatter(formatter)
- logger.addHandler(handler)
-
- # Initialize and use the client to see the changes
- client = ArangoClient(
- username='root',
- password='',
- enable_logging=True,
- logger=logger # Inject your own logger here
- )
- client.databases()
- client.endpoints()
- client.log_levels()
-
-
-The logging output for above would look something like this:
-
-.. code-block:: bash
-
- [DEBUG] GET http://127.0.0.1:8529/_db/_system/_api/database 200
- [DEBUG] GET http://127.0.0.1:8529/_db/_system/_api/endpoint 200
- [DEBUG] GET http://127.0.0.1:8529/_db/_system/_admin/log/level 200
-
-
-In order to see the full request information, turn on logging for the requests_
-library which **python-arango** uses under the hood:
+.. _Requests: https://github.com/requests/requests
.. code-block:: python
import requests
import logging
- try: # for Python 3
+ try:
+ # For Python 3
from http.client import HTTPConnection
except ImportError:
+ # For Python 2
from httplib import HTTPConnection
HTTPConnection.debuglevel = 1
@@ -96,8 +25,9 @@ library which **python-arango** uses under the hood:
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
+.. note::
+ If python-arango's default HTTP client is overridden with a custom one,
+ the code snippet above may not work as expected.
-Note that if **python-arango**'s default HTTP client, which uses requests_, is
-overridden with a custom one, the example above may not work.
-
-.. _requests: https://github.com/requests/requests
\ No newline at end of file
+Alternatively, if you want to use your own loggers, see :doc:`http` for an
+example.
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 00000000..fe164beb
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,36 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=python -msphinx
+)
+set SOURCEDIR=.
+set BUILDDIR=_build
+set SPHINXPROJ=python-arango
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The Sphinx module was not found. Make sure you have Sphinx installed,
+ echo.then set the SPHINXBUILD environment variable to point to the full
+ echo.path of the 'sphinx-build' executable. Alternatively you may add the
+ echo.Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.http://sphinx-doc.org/
+ exit /b 1
+)
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+
+:end
+popd
diff --git a/docs/overview.rst b/docs/overview.rst
new file mode 100644
index 00000000..08cb7e04
--- /dev/null
+++ b/docs/overview.rst
@@ -0,0 +1,49 @@
+Getting Started
+---------------
+
+Examples often speak louder than words, so this documentation relies heavily
+on sample code snippets with comments. Here is the first example showing how
+**python-arango** client can be initialized and used:
+
+.. testcode::
+
+ from arango import ArangoClient
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient(protocol='http', host='localhost', port=8529)
+
+ # Connect to "_system" database as root user.
+ # This returns an API wrapper for "_system" database.
+ sys_db = client.db('_system', username='root', password='passwd')
+
+ # Create a new database named "test" if it does not exist.
+ if not sys_db.has_database('test'):
+ sys_db.create_database('test')
+
+ # Connect to "test" database as root user.
+ # This returns an API wrapper for "test" database.
+ db = client.db('test', username='root', password='passwd')
+
+ # Create a new collection named "students" if it does not exist.
+ # This returns an API wrapper for "students" collection.
+ if db.has_collection('students'):
+ students = db.collection('students')
+ else:
+ students = db.create_collection('students')
+
+ # Add a hash index to the collection.
+ students.add_hash_index(fields=['name'], unique=False)
+
+ # Truncate the collection.
+ students.truncate()
+
+ # Insert new documents into the collection.
+ students.insert({'name': 'jane', 'age': 19})
+ students.insert({'name': 'josh', 'age': 18})
+ students.insert({'name': 'jake', 'age': 21})
+
+ # Execute an AQL query. This returns a result cursor.
+ cursor = db.aql.execute('FOR doc IN students RETURN doc')
+
+ # Iterate through the cursor to retrieve the documents.
+ student_names = [document['name'] for document in cursor]
diff --git a/docs/pregel.rst b/docs/pregel.rst
index 9aa94f69..15da7742 100644
--- a/docs/pregel.rst
+++ b/docs/pregel.rst
@@ -1,38 +1,42 @@
-.. _pregel-page:
-
Pregel
------
-**Python-arango** provides APIs for distributed iterative graph processing
-(Pregel). For more information, please refer to the ArangoDB manual
-`here `__.
+Python-arango supports **Pregel**, an ArangoDB module for distributed iterative
+graph processing. For more information, refer to `ArangoDB manual`_.
+
+.. _ArangoDB manual: https://docs.arangodb.com
-Here is an example showing how Pregel jobs can be started, fetched or cancelled:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- db.create_graph('my_graph')
-
- # Create and start a new Pregel job
- job_id = db.create_pregel_job(algorithm='pagerank', graph='my_graph')
-
- # Get the details of a Pregel job by its ID
- job = db.pregel_job(job_id)
- print(job['aggregators'])
- print(job['edge_count'])
- print(job['gss'])
- print(job['received_count'])
- print(job['send_count'])
- print(job['state'])
- print(job['total_runtime'])
- print(job['vertex_count'])
-
- # Delete/cancel a Pregel job by its ID
- db.delete_pregel_job(job_id)
-
-Refer to class :class:`arango.database.Database` for more details on the methods
-for Pregel jobs.
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the Pregel API wrapper.
+ pregel = db.pregel
+
+ # Start a new Pregel job in "school" graph.
+ job_id = db.pregel.create_job(
+ graph='school',
+ algorithm='pagerank',
+ store=False,
+ max_gss=100,
+ thread_count=1,
+ async_mode=False,
+ result_field='result',
+ algorithm_params={'threshold': 0.000001}
+ )
+
+ # Retrieve details of a Pregel job by ID.
+ job = pregel.job(job_id)
+
+ # Delete a Pregel job by ID.
+ pregel.delete_job(job_id)
+
+See :ref:`Pregel` for API specification.
diff --git a/docs/specs.rst b/docs/specs.rst
new file mode 100644
index 00000000..e2499697
--- /dev/null
+++ b/docs/specs.rst
@@ -0,0 +1,186 @@
+API Specification
+-----------------
+
+This page contains the specification for all classes and methods available in
+python-arango.
+
+.. _ArangoClient:
+
+ArangoClient
+============
+
+.. autoclass:: arango.client.ArangoClient
+ :members:
+
+.. _AsyncDatabase:
+
+AsyncDatabase
+=============
+
+.. autoclass:: arango.database.AsyncDatabase
+ :inherited-members:
+ :members:
+
+.. _AsyncJob:
+
+AsyncJob
+========
+
+.. autoclass:: arango.job.AsyncJob
+ :members:
+
+.. _AQL:
+
+AQL
+====
+
+.. autoclass:: arango.aql.AQL
+ :members:
+
+.. _AQLQueryCache:
+
+AQLQueryCache
+=============
+
+.. autoclass:: arango.aql.AQLQueryCache
+ :members:
+
+.. _BatchDatabase:
+
+BatchDatabase
+=============
+
+.. autoclass:: arango.database.BatchDatabase
+ :inherited-members:
+ :members:
+
+.. _BatchJob:
+
+BatchJob
+========
+
+.. autoclass:: arango.job.BatchJob
+ :members:
+
+.. _Cursor:
+
+Cursor
+======
+
+.. autoclass:: arango.cursor.Cursor
+ :members:
+
+.. _DefaultHTTPClient:
+
+DefaultHTTPClient
+=================
+
+.. autoclass:: arango.http.DefaultHTTPClient
+ :members:
+
+.. _StandardCollection:
+
+StandardCollection
+==================
+
+.. autoclass:: arango.collection.StandardCollection
+ :inherited-members:
+ :members:
+
+.. _StandardDatabase:
+
+StandardDatabase
+================
+
+.. autoclass:: arango.database.StandardDatabase
+ :inherited-members:
+ :members:
+
+.. _EdgeCollection:
+
+EdgeCollection
+==============
+
+.. autoclass:: arango.collection.EdgeCollection
+ :members:
+
+.. _Foxx:
+
+Foxx
+====
+
+.. autoclass:: arango.foxx.Foxx
+ :members:
+
+.. _Graph:
+
+Graph
+=====
+
+.. autoclass:: arango.graph.Graph
+ :members:
+
+.. _HTTPClient:
+
+HTTPClient
+==========
+
+.. autoclass:: arango.http.HTTPClient
+ :members:
+
+.. _Pregel:
+
+Pregel
+======
+
+.. autoclass:: arango.pregel.Pregel
+ :members:
+
+.. _Request:
+
+Request
+=======
+
+.. autoclass:: arango.request.Request
+ :members:
+
+.. _Response:
+
+Response
+========
+
+.. autoclass:: arango.response.Response
+ :members:
+
+.. _TransactionDatabase:
+
+TransactionDatabase
+===================
+
+.. autoclass:: arango.database.TransactionDatabase
+ :inherited-members:
+ :members:
+
+.. _TransactionJob:
+
+TransactionJob
+==============
+
+.. autoclass:: arango.job.TransactionJob
+ :members:
+
+.. _VertexCollection:
+
+VertexCollection
+================
+
+.. autoclass:: arango.collection.VertexCollection
+ :members:
+
+.. _WriteAheadLog:
+
+WAL
+====
+
+.. autoclass:: arango.wal.WAL
+ :members:
diff --git a/docs/task.rst b/docs/task.rst
index 298ce99b..8ebd2484 100644
--- a/docs/task.rst
+++ b/docs/task.rst
@@ -1,27 +1,26 @@
-.. _task-page:
+Tasks
+-----
-Task Management
----------------
+ArangoDB can schedule user-defined Javascript snippets as one-time or periodic
+(re-scheduled after each execution) tasks. Tasks are executed in the context of
+the database they are defined in.
-ArangoDB can execute user-defined Javascript snippets as one-shot (runs only
-once without repeats) or periodic (re-scheduled after each execution) tasks.
-The tasks are executed in the context of the database they are defined in.
+**Example:**
-.. note::
- When deleting a database, any tasks that were initialized under its context
- remain active. It is therefore advisable to delete any running tasks before
- deleting the database.
-
-Example:
-
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- # Create a new task
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # List all active tasks
+ db.tasks()
+
+ # Create a new task which simply prints parameters.
db.create_task(
name='test_task',
command='''
@@ -37,13 +36,15 @@ Example:
task_id='001'
)
- # List all active tasks
- db.tasks()
-
- # Retrieve information on a task by ID
+ # Retrieve details on a task by ID.
db.task('001')
- # Delete an existing task
- db.delete_task('001', ignore_missing=False)
+ # Delete an existing task by ID.
+ db.delete_task('001', ignore_missing=True)
+
+.. note::
+ When deleting a database, any tasks that were initialized under its context
+ remain active. It is therefore advisable to delete any running tasks before
+ deleting the database.
-Refer to :ref:`Database` class for more details.
+Refer to :ref:`StandardDatabase` class for API specification.
diff --git a/docs/threading.rst b/docs/threading.rst
index a7038d1c..43c18ac0 100644
--- a/docs/threading.rst
+++ b/docs/threading.rst
@@ -1,24 +1,13 @@
-.. _multithreading-page:
+Threading
+---------
-Multithreading
---------------
+Instances of the following classes are considered *stateful*, and should not be
+shared across multiple threads without locks in place:
+* :ref:`BatchDatabase` (see :doc:`batch`)
+* :ref:`BatchJob` (see :doc:`batch`)
+* :ref:`Cursor` (see :doc:`cursor`)
+* :ref:`TransactionDatabase` (see :doc:`transaction`)
+* :ref:`TransactionJob` (see :doc:`transaction`)
-Notes on Eventlet
-=================
-
-**Python-arango** should be compatible with eventlet_ *for the most part*.
-By default, **python-arango** makes API calls to ArangoDB using the requests_
-library which can be monkeypatched:
-
-.. code-block:: python
-
- import eventlet
- requests = eventlet.import_patched("requests")
-
-.. _requests: https://github.com/requests/requests
-.. _eventlet: http://eventlet.net
-
-Assuming the requests library is used and monkeypatched properly, all
-python-arango APIs except :ref:`Batch Execution ` and
-:ref:`Async Execution ` should be thread-safe.
+The rest of python-arango is safe to use in multi-threaded environments.
diff --git a/docs/transaction.rst b/docs/transaction.rst
index b0a3f16a..debc3422 100644
--- a/docs/transaction.rst
+++ b/docs/transaction.rst
@@ -1,70 +1,326 @@
-.. _transaction-page:
-
Transactions
------------
-Python-arango provides partial support for **transactions**, where incoming
-requests are queued in client-side memory and executed as a single, logical
-unit of work (ACID compliant). Due to the limitations of ArangoDB's REST API,
-:ref:`Transaction` currently supports only writes, unless raw Javascript is
-executed (see example below).
+Python-arango supports **transactions**, where requests to ArangoDB server are
+placed in client-side in-memory queue, and committed as a single, logical unit
+of work (ACID compliant). After a successful commit, results can be retrieved
+from :ref:`TransactionJob` objects.
+
+**Example:**
+
+.. testcode::
+
+ from arango import ArangoClient
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for "students" collection.
+ students = db.collection('students')
+
+ # Begin a transaction via context manager. This returns an instance of
+ # TransactionDatabase, a database-level API wrapper tailored specifically
+ # for executing transactions. The transaction is automatically committed
+ # when exiting the context. The TransactionDatabase wrapper cannot be
+ # reused after commit and may be discarded after.
+ with db.begin_transaction() as txn_db:
+
+ # Child wrappers are also tailored for transactions.
+ txn_col = txn_db.collection('students')
+
+ # API execution context is always set to "transaction".
+ assert txn_db.context == 'transaction'
+ assert txn_col.context == 'transaction'
+
+ # TransactionJob objects are returned instead of results.
+ job1 = txn_col.insert({'_key': 'Abby'})
+ job2 = txn_col.insert({'_key': 'John'})
+ job3 = txn_col.insert({'_key': 'Mary'})
+
+ # Upon exiting context, transaction is automatically committed.
+ assert 'Abby' in students
+ assert 'John' in students
+ assert 'Mary' in students
+
+ # Retrieve the status of each transaction job.
+ for job in txn_db.queued_jobs():
+ # Status is set to either "pending" (transaction is not committed yet
+ # and result is not available) or "done" (transaction is committed and
+ # result is available).
+ assert job.status() in {'pending', 'done'}
+
+ # Retrieve the job results.
+ metadata = job1.result()
+ assert metadata['_id'] == 'students/Abby'
+
+ metadata = job2.result()
+ assert metadata['_id'] == 'students/John'
+
+ metadata = job3.result()
+ assert metadata['_id'] == 'students/Mary'
+
+ # Transactions can be initiated without using a context manager.
+ # If return_result parameter is set to False, no jobs are returned.
+ txn_db = db.begin_transaction(return_result=False)
+ txn_db.collection('students').insert({'_key': 'Jake'})
+ txn_db.collection('students').insert({'_key': 'Jill'})
+
+ # The commit must be called explicitly.
+ txn_db.commit()
+ assert 'Jake' in students
+ assert 'Jill' in students
.. note::
- The user should be mindful of the client-side memory while executing
- transactions with a large number of requests.
+ * Be mindful of client-side memory capacity when issuing a large number of
+ requests in a single transaction.
+ * :ref:`TransactionDatabase` and :ref:`TransactionJob` instances are
+ stateful objects, and should not be shared across multiple threads.
+ * :ref:`TransactionDatabase` instance cannot be reused after commit.
-.. warning::
- :ref:`Transaction` is still experimental and prone to API changes.
+See :ref:`TransactionDatabase` and :ref:`TransactionJob` for API specification.
-Here is an example showing how transactions can be executed:
+Error Handling
+==============
-.. code-block:: python
+Unlike :doc:`batch ` or :doc:`async ` execution, job-specific
+error handling is not possible for transactions. As soon as a job fails, the
+entire transaction is halted, all previous successful jobs are rolled back,
+and :class:`arango.exceptions.TransactionExecuteError` is raised. The exception
+describes the first failed job, and all :ref:`TransactionJob` objects are left
+at "pending" status (they may be discarded).
- from arango import ArangoClient
+**Example:**
+.. testcode::
+
+ from arango import ArangoClient, TransactionExecuteError
+
+ # Initialize the ArangoDB client.
client = ArangoClient()
- db = client.db('my_database')
- # Initialize the Transaction object via a context manager
- with db.transaction(write='students') as txn:
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the API wrapper for "students" collection.
+ students = db.collection('students')
+
+ # Begin a new transaction.
+ txn_db = db.begin_transaction()
+ txn_col = txn_db.collection('students')
+
+ job1 = txn_col.insert({'_key': 'Karl'}) # Is going to be rolled back.
+ job2 = txn_col.insert({'_key': 'Karl'}) # Fails due to duplicate key.
+ job3 = txn_col.insert({'_key': 'Josh'}) # Never executed on the server.
+
+ try:
+ txn_db.commit()
+ except TransactionExecuteError as err:
+ assert err.http_code == 409
+ assert err.error_code == 1210
+ assert err.message.endswith('conflicting key: Karl')
+
+ # All operations in the transaction are rolled back.
+ assert 'Karl' not in students
+ assert 'Josh' not in students
+
+ # All transaction jobs are left at "pending "status and may be discarded.
+ for job in txn_db.queued_jobs():
+ assert job.status() == 'pending'
+
+Restrictions
+============
+
+This section covers important restrictions that you must keep in mind before
+choosing to use transactions.
+
+:ref:`TransactionJob` results are available only *after* commit, and are not
+accessible during execution. If you need to implement a logic which depends on
+intermediate, in-transaction values, you can instead call the method
+:func:`arango.database.Database.execute_transaction` which takes raw Javascript
+command as its argument.
+
+**Example:**
- # Transaction has a similar interface as that of Database, but
- # no results are returned on method calls (only queued in memory).
- txn.collection('students').insert({'_key': 'Abby'})
- txn.collection('students').insert({'_key': 'John'})
- txn.collection('students').insert({'_key': 'Mary'})
+.. testcode::
- # Upon exiting context, the queued requests are committed
- assert 'Abby' in db.collection('students')
- assert 'John' in db.collection('students')
- assert 'Mary' in db.collection('students')
+ from arango import ArangoClient
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
- # Transaction can also be initialized without a context manager
- txn = db.transaction(write='students')
- job5 = txn.collection('students').insert({'_key': 'Jake'})
- job6 = txn.collection('students').insert({'_key': 'Jill'})
- txn.commit() # In which case commit must be called explicitly
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
- assert 'Jake' in db.collection('students')
- assert 'Jill' in db.collection('students')
+ # Get the API wrapper for "students" collection.
+ students = db.collection('students')
- # Raw javascript can also be executed (these are committed immediately)
- result = db.transaction(write='students').execute(
+ # Execute transaction in raw Javascript.
+ result = db.execute_transaction(
command='''
function () {{
var db = require('internal').db;
db.students.save(params.student1);
- db.students.save(params.student2);
+ if (db.students.count() > 1) {
+ db.students.save(params.student2);
+ } else {
+ db.students.save(params.student3);
+ }
return true;
}}
''',
params={
- 'student1': {'_key': 'Katy'},
- 'student2': {'_key': 'Greg'}
- }
+ 'student1': {'_key': 'Lucy'},
+ 'student2': {'_key': 'Greg'},
+ 'student3': {'_key': 'Dona'}
+ },
+ read='students', # Specify the collections read.
+ write='students' # Specify the collections written.
)
- assert 'Katy' in db.collection('students')
- assert 'Greg' in db.collection('students')
assert result is True
+ assert 'Lucy' in students
+ assert 'Greg' in students
+ assert 'Dona' not in students
+
+Note that in above example, :func:`arango.database.Database.execute_transaction`
+requires names of *read* and *write* collections as python-arango has no way of
+reliably figuring out which collections are used. This is also the case when
+executing AQL queries.
+
+**Example:**
+
+.. testcode::
+
+ from arango import ArangoClient
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Begin a new transaction via context manager.
+ with db.begin_transaction() as txn_db:
+ job = txn_db.aql.execute(
+ 'INSERT {_key: "Judy", age: @age} IN students RETURN true',
+ bind_vars={'age': 19},
+ # You must specify the "read" and "write" collections.
+ read_collections=[],
+ write_collections=['students']
+ )
+ cursor = job.result()
+ assert cursor.next() is True
+ assert db.collection('students').get('Judy')['age'] == 19
+
+Due to limitations of ArangoDB's REST API, only the following methods are
+supported in transactions:
+
+* :func:`arango.aql.AQL.execute`
+* :func:`arango.collection.StandardCollection.get`
+* :func:`arango.collection.StandardCollection.get_many`
+* :func:`arango.collection.StandardCollection.insert`
+* :func:`arango.collection.StandardCollection.insert_many`
+* :func:`arango.collection.StandardCollection.update`
+* :func:`arango.collection.StandardCollection.update_many`
+* :func:`arango.collection.StandardCollection.update_match`
+* :func:`arango.collection.StandardCollection.replace`
+* :func:`arango.collection.StandardCollection.replace_many`
+* :func:`arango.collection.StandardCollection.replace_match`
+* :func:`arango.collection.StandardCollection.delete`
+* :func:`arango.collection.StandardCollection.delete_many`
+* :func:`arango.collection.StandardCollection.delete_match`
+* :func:`arango.collection.StandardCollection.properties`
+* :func:`arango.collection.StandardCollection.statistics`
+* :func:`arango.collection.StandardCollection.revision`
+* :func:`arango.collection.StandardCollection.checksum`
+* :func:`arango.collection.StandardCollection.rotate`
+* :func:`arango.collection.StandardCollection.truncate`
+* :func:`arango.collection.StandardCollection.count`
+* :func:`arango.collection.StandardCollection.has`
+* :func:`arango.collection.StandardCollection.ids`
+* :func:`arango.collection.StandardCollection.keys`
+* :func:`arango.collection.StandardCollection.all`
+* :func:`arango.collection.StandardCollection.find`
+* :func:`arango.collection.StandardCollection.find_near`
+* :func:`arango.collection.StandardCollection.find_in_range`
+* :func:`arango.collection.StandardCollection.find_in_radius`
+* :func:`arango.collection.StandardCollection.find_in_box`
+* :func:`arango.collection.StandardCollection.find_by_text`
+* :func:`arango.collection.StandardCollection.get_many`
+* :func:`arango.collection.StandardCollection.random`
+* :func:`arango.collection.StandardCollection.indexes`
+* :func:`arango.collection.VertexCollection.get`
+* :func:`arango.collection.VertexCollection.insert`
+* :func:`arango.collection.VertexCollection.update`
+* :func:`arango.collection.VertexCollection.replace`
+* :func:`arango.collection.VertexCollection.delete`
+* :func:`arango.collection.EdgeCollection.get`
+* :func:`arango.collection.EdgeCollection.insert`
+* :func:`arango.collection.EdgeCollection.update`
+* :func:`arango.collection.EdgeCollection.replace`
+* :func:`arango.collection.EdgeCollection.delete`
+
+If an unsupported method is called, :class:`arango.exceptions.TransactionStateError`
+is raised.
+
+**Example:**
+
+.. testcode::
+
+ from arango import ArangoClient, TransactionStateError
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Begin a new transaction.
+ txn_db = db.begin_transaction()
+
+ # API method "databases()" is not supported and an exception is raised.
+ try:
+ txn_db.databases()
+ except TransactionStateError as err:
+ assert err.source == 'client'
+ assert err.message == 'action not allowed in transaction'
+
+When running queries in transactions, the :doc:`cursors ` are loaded
+with the entire result set right away. This is regardless of the parameters
+passed in when executing the query (e.g batch_size). You must be mindful of
+client-side memory capacity when executing queries that can potentially return
+a large result set.
+
+**Example:**
+
+.. testcode::
+
+ # Initialize the ArangoDB client.
+ client = ArangoClient()
+
+ # Connect to "test" database as root user.
+ db = client.db('test', username='root', password='passwd')
+
+ # Get the total document count in "students" collection.
+ document_count = db.collection('students').count()
+
+ # Execute an AQL query normally (without using transactions).
+ cursor1 = db.aql.execute('FOR doc IN students RETURN doc', batch_size=1)
+
+ # Execute the same AQL query in a transaction.
+ with db.begin_transaction() as txn_db:
+ job = txn_db.aql.execute('FOR doc IN students RETURN doc', batch_size=1)
+ cursor2 = job.result()
+
+ # The first cursor acts as expected. Its current batch contains only 1 item
+ # and it still needs to fetch the rest of its result set from the server.
+ assert len(cursor1.batch()) == 1
+ assert cursor1.has_more() is True
-Refer to :ref:`Transaction` class for more details.
+ # The second cursor is pre-loaded with the entire result set, and does not
+ # require further communication with ArangoDB server. Note that value of
+ # parameter "batch_size" was ignored.
+ assert len(cursor2.batch()) == document_count
+ assert cursor2.has_more() is False
diff --git a/docs/user.rst b/docs/user.rst
index dfc6bffe..9ffd9344 100644
--- a/docs/user.rst
+++ b/docs/user.rst
@@ -1,123 +1,96 @@
-.. _user-page:
+Users and Permissions
+---------------------
-User and Access Management
---------------------------
+Python-arango provides operations for managing users and permissions. Most of
+these operations can only be performed by admin users via ``_system`` database.
-Python-arango provides operations for managing users and database/collection
-access.
+**Example:**
-Example:
-
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- # List all users
- client.users()
+ # Connect to "_system" database as root user.
+ sys_db = client.db('_system', username='root', password='passwd')
+
+ # List all users.
+ sys_db.users()
- # Create a new user
- client.create_user(
+ # Create a new user.
+ sys_db.create_user(
username='johndoe@gmail.com',
password='first_password',
+ active=True,
extra={'team': 'backend', 'title': 'engineer'}
)
- # Update an existing user
- client.update_user(
+
+ # Check if a user exists.
+ sys_db.has_user('johndoe@gmail.com')
+
+ # Retrieve details of a user.
+ sys_db.user('johndoe@gmail.com')
+
+ # Update an existing user.
+ sys_db.update_user(
username='johndoe@gmail.com',
password='second_password',
- extra={'team': 'frontend'}
+ active=True,
+ extra={'team': 'frontend', 'title': 'engineer'}
)
- # Replace an existing user
- client.replace_user(
+
+ # Replace an existing user.
+ sys_db.replace_user(
username='johndoe@gmail.com',
password='third_password',
+ active=True,
extra={'team': 'frontend', 'title': 'architect'}
)
- # Grant database access to an existing user
- client.grant_user_access(
+
+ # Retrieve user permissions for all databases and collections.
+ sys_db.permissions('johndoe@gmail.com')
+
+ # Retrieve user permission for "test" database.
+ sys_db.permission(
username='johndoe@gmail.com',
- database='my_database'
+ database='test'
)
- # Get full database and collection access details of an existing user
- client.user_access('johndoe@gmail.com', full=True)
- # Revoke database access from an existing user
- client.revoke_user_access(
+ # Retrieve user permission for "students" collection in "test" database.
+ sys_db.permission(
username='johndoe@gmail.com',
- database='my_database'
+ database='test',
+ collection='students'
)
- # Delete an existing user
- client.delete_user(username='johndoe@gmail.com')
-
-
-Note that the methods of :class:`arango.client.ArangoClient` above can only
-be called by root user with access to ``_system`` database. Non-root users can
-call the equivalent methods of :class:`arango.database.Database` through a
-database they have access to instead. For example:
-
-.. code-block:: python
-
- from arango import ArangoClient
-
- client = ArangoClient()
- db = client.database(
- name='database-the-user-has-access-to',
- username='username',
- password='password'
+ # Update user permission for "test" database.
+ sys_db.update_permission(
+ username='johndoe@gmail.com',
+ permission='rw',
+ database='test'
)
- # List all users
- db.users()
-
- # Create a new user
- db.create_user(
+ # Update user permission for "students" collection in "test" database.
+ sys_db.update_permission(
username='johndoe@gmail.com',
- password='first_password',
- extra={'team': 'backend', 'title': 'engineer'}
+ permission='ro',
+ database='test',
+ collection='students'
)
- # Update an existing user
- db.update_user(
+
+ # Reset user permission for "test" database.
+ sys_db.reset_permission(
username='johndoe@gmail.com',
- password='second_password',
- extra={'team': 'frontend'}
+ database='test'
)
- # Replace an existing user
- db.replace_user(
+
+ # Reset user permission for "students" collection in "test" database.
+ sys_db.reset_permission(
username='johndoe@gmail.com',
- password='third_password',
- extra={'team': 'frontend', 'title': 'architect'}
+ database='test',
+ collection='students'
)
- # Grant database access to an existing user
- db.grant_user_access('johndoe@gmail.com')
-
- # Get database access details of an existing user
- db.user_access('johndoe@gmail.com')
-
- # Revoke database access from an existing user
- db.revoke_user_access('johndoe@gmail.com')
-
- # Delete an existing user
- client.delete_user(username='johndoe@gmail.com')
-
-Collection-specific user access management is also possible:
-
-.. code-block:: python
-
- col = db.collection('some-collection')
-
- # Grant collection access to an existing user
- col.grant_user_access('johndoe@gmail.com')
-
- # Get collection access details of an existing user
- col.user_access('johndoe@gmail.com')
-
- # Revoke collection access from an existing user
- col.revoke_user_access('johndoe@gmail.com')
-
-Refer to classes :class:`arango.client.ArangoClient`,
-:class:`arango.database.Database`, and :class:`arango.collections.Collection`
-classes for more details.
+See :ref:`StandardDatabase` for API specification.
\ No newline at end of file
diff --git a/docs/wal.rst b/docs/wal.rst
index 3f833da3..1d43e145 100644
--- a/docs/wal.rst
+++ b/docs/wal.rst
@@ -1,70 +1,43 @@
-.. _wal-page:
-
Write-Ahead Log
---------------
-A **write-ahead log (WAL)** is a sequence of append-only files which contain
-all write operations executed on ArangoDB server. It is typically used to
-perform data recovery after a server crash or synchronize slave databases with
-master databases in replicated environments. The WAL operations provided by
-python-arango require root privileges (i.e. access to the ``_system`` database).
-For more general information on ArangoDB's write-ahead logs visit this
-`page `_.
-
+**Write-Ahead Log (WAL)** is a set of append-only files recording all writes
+on ArangoDB server. It is typically used to perform data recovery after a crash
+or synchronize slave databases with master databases in replicated environments.
+WAL operations can only be performed by admin users via ``_system`` database.
-Example:
+**Example:**
-.. code-block:: python
+.. testcode::
from arango import ArangoClient
+ # Initialize the ArangoDB client.
client = ArangoClient()
- wal = client.wal
-
- # Configure the properties of the WAL
- wal.configure(oversized_ops=True)
-
- # Retrieve the properties of the WAL
- wal.properties()
-
- # List currently running WAL transactions
- wal.transactions()
-
- # Flush the WAL with garbage collection
- wal.flush(garbage_collect=True)
+ # Connect to "_system" database as root user.
+ sys_db = client.db('_system', username='root', password='passwd')
+ # Get the WAL API wrapper.
+ wal = sys_db.wal
-Note that the methods of :attr:`arango.client.ArangoClient.wal` above can only
-be called by root user with access to ``_system`` database. Non-root users can
-call the methods of :attr:`arango.database.Database.wal` using a database they
-have access to instead. For example:
-
-.. code-block:: python
-
- from arango import ArangoClient
-
- client = ArangoClient()
- db = client.database(
- name='database-the-user-has-access-to',
- username='username',
- password='password'
+ # Configure WAL properties.
+ wal.configure(
+ historic_logs=15,
+ oversized_ops=False,
+ log_size=30000000,
+ reserve_logs=5,
+ throttle_limit=0,
+ throttle_wait=16000
)
- # The WAL object now knows of the user and the database
- wal = db.wal
-
- # Configure the properties of the WAL
- wal.configure(oversized_ops=True)
-
- # Retrieve the properties of the WAL
+ # Retrieve WAL properties.
wal.properties()
- # List currently running WAL transactions
+ # List WAL transactions.
wal.transactions()
- # Flush the WAL with garbage collection
+ # Flush WAL with garbage collection.
wal.flush(garbage_collect=True)
-
-Refer to :class:`arango.wal.WriteAheadLog` for more details on the methods.
+See :class:`WriteAheadLog` for API specification.
diff --git a/scripts/setup_arangodb.sh b/scripts/setup_arangodb.sh
deleted file mode 100644
index 556ec37b..00000000
--- a/scripts/setup_arangodb.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-cd $DIR
-
-VERSION=3.2.0
-NAME=ArangoDB-$VERSION
-
-if [ ! -d "$DIR/$NAME" ]; then
- # download ArangoDB
- echo "curl -L -o $NAME.tar.gz https://www.arangodb.org/repositories/travisCI/$NAME.tar.gz"
- curl -L -o $NAME.tar.gz https://www.arangodb.org/repositories/travisCI/$NAME.tar.gz
- echo "tar zxf $NAME.tar.gz"
- tar zvxf $NAME.tar.gz
-fi
-
-ARCH=$(arch)
-PID=$(echo $PPID)
-TMP_DIR="/tmp/arangodb.$PID"
-PID_FILE="/tmp/arangodb.$PID.pid"
-ARANGODB_DIR="$DIR/$NAME"
-ARANGOD="${ARANGODB_DIR}/bin/arangod_x86_64"
-
-# create database directory
-mkdir ${TMP_DIR}
-
-echo "Starting ArangoDB '${ARANGOD}'"
-
-${ARANGOD} \
- --database.directory ${TMP_DIR} \
- --configuration none \
- --server.endpoint tcp://127.0.0.1:8529 \
- --javascript.app-path ${ARANGODB_DIR}/js/apps \
- --javascript.startup-directory ${ARANGODB_DIR}/js \
- --database.maximal-journal-size 1048576 &
-
-sleep 2
-
-echo "Check for arangod process"
-process=$(ps auxww | grep "bin/arangod" | grep -v grep)
-
-if [ "x$process" == "x" ]; then
- echo "no 'arangod' process found"
- echo "ARCH = $ARCH"
- exit 1
-fi
-
-echo "Waiting until ArangoDB is ready on port 8529"
-sleep 10
-
-echo "ArangoDB is up"
diff --git a/setup.py b/setup.py
index e2c3c02f..42ef808d 100644
--- a/setup.py
+++ b/setup.py
@@ -1,20 +1,18 @@
from setuptools import setup, find_packages
-version = {}
-with open('./arango/version.py') as fp:
- exec(fp.read(), version)
+from arango import version
setup(
name='python-arango',
description='Python Driver for ArangoDB',
- version=version['VERSION'],
+ version=version.__version__,
author='Joohwan Oh',
author_email='joohwan.oh@outlook.com',
url='https://github.com/joowani/python-arango',
- packages=find_packages(),
+ packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=['requests', 'six'],
- tests_require=['pytest'],
+ tests_require=['pytest', 'mock', 'flake8'],
license='MIT',
classifiers=[
'Intended Audience :: Developers',
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 00000000..dc6c2384
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,302 @@
+from __future__ import absolute_import, unicode_literals, division
+
+import pytest
+
+from arango import ArangoClient
+from arango.database import StandardDatabase
+from tests.helpers import (
+ generate_db_name,
+ generate_col_name,
+ generate_string,
+ generate_username,
+ generate_graph_name,
+)
+from tests.executors import (
+ TestAsyncExecutor,
+ TestBatchExecutor,
+ TestTransactionExecutor
+)
+
+print('Setting up test ArangoDB client ...')
+_client = ArangoClient()
+_sys_db = _client.db('_system', 'root', 'passwd')
+
+print('Setting up test databases ...')
+_db_name = generate_db_name()
+_username = generate_username()
+_password = generate_string()
+_db_users = [{
+ 'username': _username,
+ 'password': _password,
+ 'active': True
+}]
+_sys_db.create_database(_db_name, _db_users)
+_db = _client.db(_db_name, _username, _password)
+_bad_db_name = generate_db_name()
+_bad_db = _client.db(_bad_db_name, '', '')
+
+print('Setting up test collections ...')
+_col_name = generate_col_name()
+_col = _db.create_collection(_col_name)
+_skiplist_index = _col.add_skiplist_index(['val'])
+_fulltext_index = _col.add_fulltext_index(['text'])
+_geo_index = _col.add_geo_index(['loc'])
+_bad_col = _bad_db.collection(_col_name)
+_lecol_name = generate_col_name()
+_lecol = _db.create_collection(_lecol_name, edge=True)
+
+print('Setting up test graphs ...')
+_graph_name = generate_graph_name()
+_graph = _db.create_graph(_graph_name)
+_bad_graph = _bad_db.graph(_graph_name)
+
+print('Setting up test "_from" vertex collections ...')
+_fvcol_name = generate_col_name()
+_fvcol = _graph.create_vertex_collection(_fvcol_name)
+_bad_fvcol = _bad_graph.vertex_collection(_fvcol_name)
+
+print('Setting up test "_to" vertex collections ...')
+_tvcol_name = generate_col_name()
+_tvcol = _graph.create_vertex_collection(_tvcol_name)
+_bad_tvcol = _bad_graph.vertex_collection(_tvcol_name)
+
+print('Setting up test edge collection and definition ...')
+_ecol_name = generate_col_name()
+_ecol = _graph.create_edge_definition(
+ edge_collection=_ecol_name,
+ from_vertex_collections=[_fvcol_name],
+ to_vertex_collections=[_tvcol_name]
+)
+_bad_ecol = _bad_graph.edge_collection(_ecol_name)
+
+print('Setting up test documents ...')
+_docs = [
+ {'_key': '1', 'val': 1, 'text': 'foo', 'loc': [1, 1]},
+ {'_key': '2', 'val': 2, 'text': 'foo', 'loc': [2, 2]},
+ {'_key': '3', 'val': 3, 'text': 'foo', 'loc': [3, 3]},
+ {'_key': '4', 'val': 4, 'text': 'bar', 'loc': [4, 4]},
+ {'_key': '5', 'val': 5, 'text': 'bar', 'loc': [5, 5]},
+ {'_key': '6', 'val': 6, 'text': 'bar', 'loc': [5, 5]},
+]
+
+print('Setting up test "_from" vertex documents ...')
+_fvdocs = [
+ {'_key': '1', 'val': 1},
+ {'_key': '2', 'val': 2},
+ {'_key': '3', 'val': 3},
+]
+
+print('Setting up test "_to" vertex documents ...')
+_tvdocs = [
+ {'_key': '4', 'val': 4},
+ {'_key': '5', 'val': 5},
+ {'_key': '6', 'val': 6},
+]
+
+print('Setting up test edge documents ...')
+_edocs = [
+ {
+ '_key': '1',
+ '_from': '{}/1'.format(_fvcol_name),
+ '_to': '{}/4'.format(_tvcol_name)
+ },
+ {
+ '_key': '2',
+ '_from': '{}/1'.format(_fvcol_name),
+ '_to': '{}/5'.format(_tvcol_name)
+ },
+ {
+ '_key': '3',
+ '_from': '{}/6'.format(_fvcol_name),
+ '_to': '{}/2'.format(_tvcol_name)
+ },
+ {
+ '_key': '4',
+ '_from': '{}/8'.format(_fvcol_name),
+ '_to': '{}/7'.format(_tvcol_name)
+ },
+]
+
+
+@pytest.fixture(autouse=False)
+def client():
+ return _client
+
+
+@pytest.fixture(autouse=False)
+def url():
+ return _client.base_url
+
+
+@pytest.fixture(autouse=False)
+def username():
+ return _username
+
+
+@pytest.fixture(autouse=False)
+def password():
+ return _password
+
+
+@pytest.fixture(autouse=False)
+def sys_db():
+ return _sys_db
+
+
+@pytest.fixture(autouse=False)
+def col(db):
+ collection = db.collection(_col_name)
+ collection.truncate()
+ return collection
+
+
+@pytest.fixture(autouse=False)
+def lecol(db):
+ collection = db.collection(_lecol_name)
+ collection.truncate()
+ return collection
+
+
+@pytest.fixture(autouse=False)
+def bad_col(bad_db):
+ return bad_db.collection(_col_name)
+
+
+@pytest.fixture(autouse=False)
+def graph(db):
+ return db.graph(_graph_name)
+
+
+@pytest.fixture(autouse=False)
+def bad_graph(bad_db):
+ return bad_db.graph(_graph_name)
+
+
+@pytest.fixture(autouse=False)
+def fvcol():
+ _fvcol.truncate()
+ return _fvcol
+
+
+@pytest.fixture(autouse=False)
+def bad_fvcol():
+ return _bad_fvcol
+
+
+@pytest.fixture(autouse=False)
+def tvcol():
+ _tvcol.truncate()
+ return _tvcol
+
+
+@pytest.fixture(autouse=False)
+def ecol():
+ return _ecol
+
+
+@pytest.fixture(autouse=False)
+def bad_ecol():
+ return _bad_ecol
+
+
+@pytest.fixture(autouse=False)
+def docs():
+ return [doc.copy() for doc in _docs]
+
+
+@pytest.fixture(autouse=False)
+def fvdocs():
+ return [doc.copy() for doc in _fvdocs]
+
+
+@pytest.fixture(autouse=False)
+def tvdocs():
+ return [doc.copy() for doc in _tvdocs]
+
+
+@pytest.fixture(autouse=False)
+def edocs():
+ return [doc.copy() for doc in _edocs]
+
+
+@pytest.fixture(autouse=False)
+def geo():
+ return _geo_index
+
+
+def pytest_addoption(parser):
+ parser.addoption("--complete", action="store_true")
+
+
+# noinspection PyProtectedMember
+def pytest_generate_tests(metafunc):
+ test_name = metafunc.module.__name__.split('.test_', 1)[-1]
+
+ dbs = [_db]
+ bad_dbs = [_bad_db]
+
+ if metafunc.config.getoption('complete'):
+
+ if test_name in {'collection', 'document', 'graph', 'aql', 'index'}:
+ # Add test transaction databases
+ tdb = StandardDatabase(_db._conn)
+ tdb._executor = TestTransactionExecutor(_db._conn)
+ tdb._is_transaction = True
+ dbs.append(tdb)
+
+ bad_tdb = StandardDatabase(_bad_db._conn)
+ bad_tdb._executor = TestTransactionExecutor(_bad_db._conn)
+ bad_dbs.append(bad_tdb)
+
+ if test_name not in {
+ 'async', 'batch', 'transaction', 'client', 'exception'
+ }:
+ # Add test async databases
+ adb = StandardDatabase(_db._conn)
+ adb._executor = TestAsyncExecutor(_db._conn)
+ dbs.append(adb)
+
+ bad_adb = StandardDatabase(_bad_db._conn)
+ bad_adb._executor = TestAsyncExecutor(_bad_db._conn)
+ bad_dbs.append(bad_adb)
+
+ # Add test batch databases
+ bdb = StandardDatabase(_db._conn)
+ bdb._executor = TestBatchExecutor(_db._conn)
+ dbs.append(bdb)
+
+ bad_bdb = StandardDatabase(_bad_db._conn)
+ bad_bdb._executor = TestBatchExecutor(_bad_db._conn)
+ bad_dbs.append(bad_bdb)
+
+ if 'db' in metafunc.fixturenames and 'bad_db' in metafunc.fixturenames:
+ metafunc.parametrize('db,bad_db', zip(dbs, bad_dbs))
+ elif 'db' in metafunc.fixturenames:
+ metafunc.parametrize('db', dbs)
+ elif 'bad_db' in metafunc.fixturenames:
+ metafunc.parametrize('bad_db', bad_dbs)
+
+
+def pytest_unconfigure(*_): # pragma: no cover
+ # Remove all test async jobs.
+ _sys_db.clear_async_jobs()
+
+ # Remove all test databases.
+ for database in _sys_db.databases():
+ if database.startswith('test_database'):
+ _sys_db.delete_database(database)
+
+ # Remove all test collections
+ for collection in _sys_db.collections():
+ if collection['name'].startswith('test_collection'):
+ _sys_db.delete_collection(collection)
+
+ # Remove all test tasks
+ for task in _sys_db.tasks():
+ if task['name'].startswith('test_task'):
+ _sys_db.delete_task(task['id'], ignore_missing=True)
+
+ # Remove all test users
+ for user in _sys_db.users():
+ if user['username'].startswith('test_user'):
+ _sys_db.delete_user(user['username'], ignore_missing=True)
diff --git a/tests/executors.py b/tests/executors.py
new file mode 100644
index 00000000..ef6d3bbc
--- /dev/null
+++ b/tests/executors.py
@@ -0,0 +1,67 @@
+import time
+
+from arango.executor import (
+ AsyncExecutor,
+ BatchExecutor,
+ TransactionExecutor
+)
+from arango.job import BatchJob, TransactionJob
+
+
+class TestAsyncExecutor(AsyncExecutor):
+
+ def __init__(self, connection):
+ super(TestAsyncExecutor, self).__init__(
+ connection=connection,
+ return_result=True
+ )
+
+ def execute(self, request, response_handler):
+ job = AsyncExecutor.execute(self, request, response_handler)
+ while job.status() != 'done':
+ time.sleep(.01)
+ return job.result()
+
+
+class TestBatchExecutor(BatchExecutor):
+
+ def __init__(self, connection):
+ super(TestBatchExecutor, self).__init__(
+ connection=connection,
+ return_result=True
+ )
+
+ def execute(self, request, response_handler):
+ self._committed = False
+ self._queue.clear()
+
+ job = BatchJob(response_handler)
+ self._queue[job.id] = (request, job)
+ self.commit()
+ return job.result()
+
+
+class TestTransactionExecutor(TransactionExecutor):
+
+ def __init__(self, connection):
+ super(TestTransactionExecutor, self).__init__(
+ connection=connection,
+ timeout=0,
+ sync=True,
+ return_result=True,
+ read=None,
+ write=None
+ )
+
+ def execute(self, request, response_handler):
+ if request.command is None:
+ response = self._conn.send_request(request)
+ return response_handler(response)
+ else:
+ self._committed = False
+ self._queue.clear()
+
+ job = TransactionJob(response_handler)
+ self._queue[job.id] = (request, job)
+ self.commit()
+ return job.result()
diff --git a/tests/helpers.py b/tests/helpers.py
new file mode 100644
index 00000000..64c22cf8
--- /dev/null
+++ b/tests/helpers.py
@@ -0,0 +1,140 @@
+from __future__ import absolute_import, unicode_literals
+
+from collections import deque
+from uuid import uuid4
+
+import pytest
+
+from arango.cursor import Cursor
+from arango.exceptions import (
+ AsyncExecuteError,
+ BatchExecuteError,
+ TransactionExecuteError
+)
+
+
+def generate_db_name():
+ """Generate and return a random database name.
+
+ :return: Random database name.
+ :rtype: str | unicode
+ """
+ return 'test_database_{}'.format(uuid4().hex)
+
+
+def generate_col_name():
+ """Generate and return a random collection name.
+
+ :return: Random collection name.
+ :rtype: str | unicode
+ """
+ return 'test_collection_{}'.format(uuid4().hex)
+
+
+def generate_graph_name():
+ """Generate and return a random graph name.
+
+ :return: Random graph name.
+ :rtype: str | unicode
+ """
+ return 'test_graph_{}'.format(uuid4().hex)
+
+
+def generate_doc_key():
+ """Generate and return a random document key.
+
+ :return: Random document key.
+ :rtype: str | unicode
+ """
+ return 'test_document_{}'.format(uuid4().hex)
+
+
+def generate_task_name():
+ """Generate and return a random task name.
+
+ :return: Random task name.
+ :rtype: str | unicode
+ """
+ return 'test_task_{}'.format(uuid4().hex)
+
+
+def generate_task_id():
+ """Generate and return a random task ID.
+
+ :return: Random task ID
+ :rtype: str | unicode
+ """
+ return 'test_task_id_{}'.format(uuid4().hex)
+
+
+def generate_username():
+ """Generate and return a random username.
+
+ :return: Random username.
+ :rtype: str | unicode
+ """
+ return 'test_user_{}'.format(uuid4().hex)
+
+
+def generate_string():
+ """Generate and return a random unique string.
+
+ :return: Random unique string.
+ :rtype: str | unicode
+ """
+ return uuid4().hex
+
+
+def generate_service_mount():
+ """Generate and return a random service name.
+
+ :return: Random service name.
+ :rtype: str | unicode
+ """
+ return '/test_{}'.format(uuid4().hex)
+
+
+def clean_doc(obj):
+ """Return the document(s) with all extra system keys stripped.
+
+ :param obj: document(s)
+ :type obj: list | dict | arango.cursor.Cursor
+ :return: Document(s) with the system keys stripped
+ :rtype: list | dict
+ """
+ if isinstance(obj, (Cursor, list, deque)):
+ docs = [clean_doc(d) for d in obj]
+ return sorted(docs, key=lambda doc: doc['_key'])
+
+ if isinstance(obj, dict):
+ return {
+ field: value for field, value in obj.items()
+ if field in {'_key', '_from', '_to'} or not field.startswith('_')
+ }
+
+
+def extract(key, items):
+ """Return the sorted values from dicts using the given key.
+
+ :param key: Dictionary key
+ :type key: str | unicode
+ :param items: Items to filter.
+ :type items: [dict]
+ :return: Set of values.
+ :rtype: [str | unicode]
+ """
+ return sorted(item[key] for item in items)
+
+
+def assert_raises(exception):
+ """Assert that the given exception is raised.
+
+ :param exception: Expected exception.
+ :type: Exception
+ """
+ return pytest.raises((
+ exception,
+ AsyncExecuteError,
+ BatchExecuteError,
+ TransactionExecuteError
+ ))
diff --git a/tests/static/service.zip b/tests/static/service.zip
new file mode 100644
index 00000000..00bf513e
Binary files /dev/null and b/tests/static/service.zip differ
diff --git a/tests/test_aql.py b/tests/test_aql.py
index d3f13a3d..919a7ebf 100644
--- a/tests/test_aql.py
+++ b/tests/test_aql.py
@@ -1,177 +1,298 @@
from __future__ import absolute_import, unicode_literals
-import pytest
-
-from arango import ArangoClient
-from arango.aql import AQL
-from arango.exceptions import *
-
-from .utils import (
- generate_db_name,
- generate_col_name,
- generate_user_name
-)
-
-
-arango_client = ArangoClient()
-
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-db.create_collection(col_name)
-username = generate_user_name()
-user = arango_client.create_user(username, 'password')
-func_name = ''
-func_body = ''
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
- arango_client.delete_user(username, ignore_missing=True)
-
-@pytest.mark.order1
-def test_init():
- assert isinstance(db.aql, AQL)
- assert 'ArangoDB AQL' in repr(db.aql)
-
-
-@pytest.mark.order2
-def test_query_explain():
- fields_to_check = [
+from arango.exceptions import (
+ AQLCacheClearError,
+ AQLCacheConfigureError,
+ AQLCachePropertiesError,
+ AQLFunctionCreateError,
+ AQLFunctionDeleteError,
+ AQLFunctionListError,
+ AQLQueryClearError,
+ AQLQueryExecuteError,
+ AQLQueryExplainError,
+ AQLQueryListError,
+ AQLQueryTrackingGetError,
+ AQLQueryTrackingSetError,
+ AQLQueryKillError,
+ AQLQueryValidateError,
+ CursorStateError)
+from tests.helpers import assert_raises, extract
+
+
+def test_aql_attributes(db, username):
+ assert db.context in ['default', 'async', 'batch', 'transaction']
+ assert db.username == username
+ assert db.db_name == db.name
+ assert repr(db.aql) == ''.format(db.name)
+ assert repr(db.aql.cache) == ''.format(db.name)
+
+
+def test_aql_query_management(db, bad_db, col, docs):
+ plan_fields = [
'estimatedNrItems',
'estimatedCost',
'rules',
'variables',
'collections',
]
+ # Test explain invalid query
+ with assert_raises(AQLQueryExplainError) as err:
+ db.aql.explain('INVALID QUERY')
+ assert err.value.error_code == 1501
- # Test invalid query
- with pytest.raises(AQLQueryExplainError):
- db.aql.explain('THIS IS AN INVALID QUERY')
+ # Test explain valid query with all_plans set to False
+ plan = db.aql.explain(
+ 'FOR d IN {} RETURN d'.format(col.name),
+ all_plans=False,
+ opt_rules=['-all', '+use-index-range']
+ )
+ assert all(field in plan for field in plan_fields)
- # Test valid query (all_plans=True)
+ # Test explain valid query with all_plans set to True
plans = db.aql.explain(
- 'FOR d IN {} RETURN d'.format(col_name),
+ 'FOR d IN {} RETURN d'.format(col.name),
all_plans=True,
opt_rules=['-all', '+use-index-range'],
max_plans=10
)
for plan in plans:
- for field in fields_to_check:
- assert field in plan
-
- # Test valid query (all_plans=False)
- plan = db.aql.explain(
- 'FOR d IN {} RETURN d'.format(col_name),
- all_plans=False,
- opt_rules=['-all', '+use-index-range']
- )
- for field in fields_to_check:
- assert field in plan
-
+ assert all(field in plan for field in plan_fields)
+ assert len(plans) < 10
-@pytest.mark.order3
-def test_query_validate():
- # Test invalid query
- with pytest.raises(AQLQueryValidateError):
- db.aql.validate('THIS IS AN INVALID QUERY')
+ # Test validate invalid query
+ with assert_raises(AQLQueryValidateError) as err:
+ db.aql.validate('INVALID QUERY')
+ assert err.value.error_code == 1501
- # Test valid query
- result = db.aql.validate('FOR d IN {} RETURN d'.format(col_name))
+ # Test validate valid query
+ result = db.aql.validate('FOR d IN {} RETURN d'.format(col.name))
assert 'ast' in result
- assert 'bindVars' in result
+ assert 'bind_vars' in result
assert 'collections' in result
assert 'parsed' in result
-
-@pytest.mark.order4
-def test_query_execute():
- # Test invalid AQL query
- with pytest.raises(AQLQueryExecuteError):
- db.aql.execute('THIS IS AN INVALID QUERY')
-
- # Test valid AQL query #1
- db.collection(col_name).import_bulk([
- {'_key': 'doc01'},
- {'_key': 'doc02'},
- {'_key': 'doc03'},
- ])
- result = db.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
+ # Test execute invalid AQL query
+ with assert_raises(AQLQueryExecuteError) as err:
+ db.aql.execute('INVALID QUERY')
+ assert err.value.error_code == 1501
+
+ # Test execute valid query
+ db.collection(col.name).import_bulk(docs)
+ cursor = db.aql.execute(
+ '''
+ FOR d IN {col}
+ UPDATE {{_key: d._key, _val: @val }} IN {col}
+ RETURN NEW
+ '''.format(col=col.name),
count=True,
batch_size=1,
ttl=10,
- optimizer_rules=['+all']
- )
- assert set(d['_key'] for d in result) == {'doc01', 'doc02', 'doc03'}
-
- # Test valid AQL query #2
- db.collection(col_name).import_bulk([
- {'_key': 'doc04', 'value': 1},
- {'_key': 'doc05', 'value': 1},
- {'_key': 'doc06', 'value': 3},
- ])
- result = db.aql.execute(
- 'FOR d IN {} FILTER d.value == @value RETURN d'.format(col_name),
- bind_vars={'value': 1},
- count=True,
+ bind_vars={'val': 42},
full_count=True,
- max_plans=100
+ max_plans=1000,
+ optimizer_rules=['+all'],
+ cache=True,
+ memory_limit=1000000,
+ fail_on_warning=False,
+ profile=True,
+ max_transaction_size=100000,
+ max_warning_count=10,
+ intermediate_commit_count=1,
+ intermediate_commit_size=1000,
+ satellite_sync_wait=False,
+ write_collections=[col.name],
+ read_collections=[col.name]
)
- assert set(d['_key'] for d in result) == {'doc04', 'doc05'}
-
-
-@pytest.mark.order5
-def test_query_function_create_and_list():
- global func_name, func_body
-
- assert db.aql.functions() == {}
- func_name = 'myfunctions::temperature::celsiustofahrenheit'
- func_body = 'function (celsius) { return celsius * 1.8 + 32; }'
+ if db.context == 'transaction':
+ assert cursor.id is None
+ assert cursor.type == 'cursor'
+ assert cursor.batch() is not None
+ assert cursor.has_more() is False
+ assert cursor.count() == len(col)
+ assert cursor.cached() is None
+ assert cursor.statistics() is None
+ assert cursor.profile() is None
+ assert cursor.warnings() is None
+ assert extract('_key', cursor) == extract('_key', docs)
+ with assert_raises(CursorStateError) as err:
+ cursor.close()
+ assert err.value.message == 'cursor ID not set'
+ else:
+ assert cursor.id is not None
+ assert cursor.type == 'cursor'
+ assert cursor.batch() is not None
+ assert cursor.has_more() is True
+ assert cursor.count() == len(col)
+ assert cursor.cached() is False
+ assert cursor.statistics() is not None
+ assert cursor.profile() is not None
+ assert cursor.warnings() == []
+ assert extract('_key', cursor) == extract('_key', docs)
+ assert cursor.close(ignore_missing=True) is False
+
+ # Test get tracking properties with bad database
+ with assert_raises(AQLQueryTrackingGetError) as err:
+ bad_db.aql.tracking()
+ assert err.value.error_code == 1228
+
+ # Test get tracking properties
+ tracking = db.aql.tracking()
+ assert isinstance(tracking['enabled'], bool)
+ assert isinstance(tracking['max_query_string_length'], int)
+ assert isinstance(tracking['max_slow_queries'], int)
+ assert isinstance(tracking['slow_query_threshold'], int)
+ assert isinstance(tracking['track_bind_vars'], bool)
+ assert isinstance(tracking['track_slow_queries'], bool)
+
+ # Test set tracking properties with bad database
+ with assert_raises(AQLQueryTrackingSetError) as err:
+ bad_db.aql.set_tracking(enabled=not tracking['enabled'])
+ assert err.value.error_code == 1228
+ assert db.aql.tracking()['enabled'] == tracking['enabled']
+
+ # Test set tracking properties
+ new_tracking = db.aql.set_tracking(
+ enabled=not tracking['enabled'],
+ max_query_string_length=4000,
+ max_slow_queries=60,
+ slow_query_threshold=15,
+ track_bind_vars=not tracking['track_bind_vars'],
+ track_slow_queries=not tracking['track_slow_queries']
+ )
+ assert new_tracking['enabled'] != tracking['enabled']
+ assert new_tracking['max_query_string_length'] == 4000
+ assert new_tracking['max_slow_queries'] == 60
+ assert new_tracking['slow_query_threshold'] == 15
+ assert new_tracking['track_bind_vars'] != tracking['track_bind_vars']
+ assert new_tracking['track_slow_queries'] != tracking['track_slow_queries']
+
+ # Make sure to revert the properties
+ new_tracking = db.aql.set_tracking(
+ enabled=True,
+ track_bind_vars=True,
+ track_slow_queries=True
+ )
+ assert new_tracking['enabled'] is True
+ assert new_tracking['track_bind_vars'] is True
+ assert new_tracking['track_slow_queries'] is True
+
+ # Kick off some long lasting queries in the background
+ db.begin_async_execution().aql.execute('RETURN SLEEP(100)')
+ db.begin_async_execution().aql.execute('RETURN SLEEP(50)')
+
+ # Test list queries
+ queries = db.aql.queries()
+ for query in queries:
+ assert 'id' in query
+ assert 'query' in query
+ assert 'started' in query
+ assert 'state' in query
+ assert 'bind_vars' in query
+ assert 'runtime' in query
+ assert len(queries) == 2
+
+ # Test list queries with bad database
+ with assert_raises(AQLQueryListError) as err:
+ bad_db.aql.queries()
+ assert err.value.error_code == 1228
+
+ # Test kill queries
+ query_id_1, query_id_2 = extract('id', queries)
+ assert db.aql.kill(query_id_1) is True
+
+ while len(queries) > 1:
+ queries = db.aql.queries()
+ assert query_id_1 not in extract('id', queries)
+
+ assert db.aql.kill(query_id_2) is True
+ while len(queries) > 0:
+ queries = db.aql.queries()
+ assert query_id_2 not in extract('id', queries)
+
+ # Test kill missing queries
+ with assert_raises(AQLQueryKillError) as err:
+ db.aql.kill(query_id_1)
+ assert err.value.error_code == 1591
+ with assert_raises(AQLQueryKillError) as err:
+ db.aql.kill(query_id_2)
+ assert err.value.error_code == 1591
+
+ # Test list slow queries
+ assert db.aql.slow_queries() == []
+
+ # Test list slow queries with bad database
+ with assert_raises(AQLQueryListError) as err:
+ bad_db.aql.slow_queries()
+ assert err.value.error_code == 1228
+
+ # Test clear slow queries
+ assert db.aql.clear_slow_queries() is True
+
+ # Test clear slow queries with bad database
+ with assert_raises(AQLQueryClearError) as err:
+ bad_db.aql.clear_slow_queries()
+ assert err.value.error_code == 1228
+
+
+def test_aql_function_management(db, bad_db):
+ fn_group = 'functions::temperature'
+ fn_name_1 = 'functions::temperature::celsius_to_fahrenheit'
+ fn_body_1 = 'function (celsius) { return celsius * 1.8 + 32; }'
+ fn_name_2 = 'functions::temperature::fahrenheit_to_celsius'
+ fn_body_2 = 'function (fahrenheit) { return (fahrenheit - 32) / 1.8; }'
+ bad_fn_name = 'functions::temperature::should_not_exist'
+ bad_fn_body = 'function (celsius) { invalid syntax }'
+
+ # Test list AQL functions with bad database
+ with assert_raises(AQLFunctionListError) as err:
+ bad_db.aql.functions()
+ assert err.value.error_code == 1228
- # Test create AQL function
- db.aql.create_function(func_name, func_body)
- assert db.aql.functions() == {func_name: func_body}
+ # Test create invalid AQL function
+ with assert_raises(AQLFunctionCreateError) as err:
+ db.aql.create_function(bad_fn_name, bad_fn_body)
+ assert err.value.error_code == 1581
- # Test create AQL function again (idempotency)
- db.aql.create_function(func_name, func_body)
- assert db.aql.functions() == {func_name: func_body}
+ # Test create AQL function one
+ db.aql.create_function(fn_name_1, fn_body_1)
+ assert db.aql.functions() == {fn_name_1: fn_body_1}
- # Test create invalid AQL function
- func_body = 'function (celsius) { invalid syntax }'
- with pytest.raises(AQLFunctionCreateError):
- result = db.aql.create_function(func_name, func_body)
- assert result is True
+ # Test create AQL function one again (idempotency)
+ db.aql.create_function(fn_name_1, fn_body_1)
+ assert db.aql.functions() == {fn_name_1: fn_body_1}
+ # Test create AQL function two
+ db.aql.create_function(fn_name_2, fn_body_2)
+ assert db.aql.functions() == {fn_name_1: fn_body_1, fn_name_2: fn_body_2}
-@pytest.mark.order6
-def test_query_function_delete_and_list():
- # Test delete AQL function
- result = db.aql.delete_function(func_name)
- assert result is True
+ # Test delete AQL function one
+ assert db.aql.delete_function(fn_name_1) is True
+ assert db.aql.functions() == {fn_name_2: fn_body_2}
# Test delete missing AQL function
- with pytest.raises(AQLFunctionDeleteError):
- db.aql.delete_function(func_name)
-
- # Test delete missing AQL function (ignore_missing)
- result = db.aql.delete_function(func_name, ignore_missing=True)
- assert result is False
+ with assert_raises(AQLFunctionDeleteError) as err:
+ db.aql.delete_function(fn_name_1)
+ assert err.value.error_code == 1582
+ assert db.aql.delete_function(fn_name_1, ignore_missing=True) is False
+ assert db.aql.functions() == {fn_name_2: fn_body_2}
+
+ # Test delete AQL function group
+ assert db.aql.delete_function(fn_group, group=True) is True
assert db.aql.functions() == {}
-@pytest.mark.order7
-def test_get_query_cache_properties():
+def test_aql_cache_management(db, bad_db):
+ # Test get AQL cache properties
properties = db.aql.cache.properties()
assert 'mode' in properties
assert 'limit' in properties
+ # Test get AQL cache properties with bad database
+ with assert_raises(AQLCachePropertiesError):
+ bad_db.aql.cache.properties()
-@pytest.mark.order8
-def test_set_query_cache_properties():
- properties = db.aql.cache.configure(
- mode='on', limit=100
- )
+ # Test get AQL cache configure properties
+ properties = db.aql.cache.configure(mode='on', limit=100)
assert properties['mode'] == 'on'
assert properties['limit'] == 100
@@ -179,38 +300,15 @@ def test_set_query_cache_properties():
assert properties['mode'] == 'on'
assert properties['limit'] == 100
+ # Test get AQL cache configure properties with bad database
+ with assert_raises(AQLCacheConfigureError):
+ bad_db.aql.cache.configure(mode='on')
-@pytest.mark.order9
-def test_clear_query_cache():
+ # Test get AQL cache clear
result = db.aql.cache.clear()
assert isinstance(result, bool)
-
-@pytest.mark.order10
-def test_aql_errors():
- bad_db_name = generate_db_name()
- bad_aql = arango_client.database(bad_db_name).aql
-
- with pytest.raises(ArangoError) as err:
- bad_aql.functions()
- assert isinstance(err.value, AQLFunctionListError) \
- or isinstance(err.value, AsyncExecuteError) \
- or isinstance(err.value, BatchExecuteError)
-
- with pytest.raises(ArangoError) as err:
- bad_aql.cache.properties()
- assert isinstance(err.value, AQLCachePropertiesError) \
- or isinstance(err.value, AsyncExecuteError) \
- or isinstance(err.value, BatchExecuteError)
-
- with pytest.raises(ArangoError) as err:
- bad_aql.cache.configure(mode='on')
- assert isinstance(err.value, AQLCacheConfigureError) \
- or isinstance(err.value, AsyncExecuteError) \
- or isinstance(err.value, BatchExecuteError)
-
- with pytest.raises(ArangoError) as err:
- bad_aql.cache.clear()
- assert isinstance(err.value, AQLCacheClearError) \
- or isinstance(err.value, AsyncExecuteError) \
- or isinstance(err.value, BatchExecuteError)
+ # Test get AQL cache clear with bad database
+ with assert_raises(AQLCacheClearError) as err:
+ bad_db.aql.cache.clear()
+ assert err.value.error_code == 1228
diff --git a/tests/test_async.py b/tests/test_async.py
index 51f89a45..84693f1e 100644
--- a/tests/test_async.py
+++ b/tests/test_async.py
@@ -1,336 +1,261 @@
from __future__ import absolute_import, unicode_literals
-from time import sleep, time
+import time
import pytest
from six import string_types
-from arango import ArangoClient
-from arango.aql import AQL
-from arango.collections import Collection
+from arango.database import AsyncDatabase
from arango.exceptions import (
AsyncExecuteError,
- AsyncJobCancelError,
AsyncJobClearError,
AsyncJobResultError,
AsyncJobStatusError,
AsyncJobListError,
+ AsyncJobCancelError,
AQLQueryExecuteError
)
-from arango.graph import Graph
-
-from .utils import (
- generate_db_name,
- generate_col_name
-)
+from arango.job import AsyncJob
+from tests.helpers import extract
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-col.add_fulltext_index(fields=['val'])
+def wait_on_job(job):
+ """Block until the async job is done."""
+ while job.status() != 'done':
+ time.sleep(.05) # pragma: no cover
+ return job
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
+def wait_on_jobs(db):
+ """Block until all async jobs are finished."""
+ while len(db.async_jobs('pending')) > 0:
+ time.sleep(.05) # pragma: no cover
-def setup_function(*_):
- col.truncate()
+def test_async_wrapper_attributes(db, col, username):
+ async_db = db.begin_async_execution()
+ assert isinstance(async_db, AsyncDatabase)
+ assert async_db.username == username
+ assert async_db.context == 'async'
+ assert async_db.db_name == db.name
+ assert async_db.name == db.name
+ assert repr(async_db) == ''.format(db.name)
-def wait_on_job(job):
- while job.status() == 'pending':
- pass
-
-
-@pytest.mark.order1
-def test_init():
- async = db.async(return_result=True)
-
- assert async.type == 'async'
- assert 'ArangoDB asynchronous execution' in repr(async)
- assert isinstance(async.aql, AQL)
- assert isinstance(async.graph('test'), Graph)
- assert isinstance(async.collection('test'), Collection)
-
-
-@pytest.mark.order2
-def test_async_execute_error():
- bad_db = arango_client.db(
- name=db_name,
- username='root',
- password='incorrect'
- )
- async = bad_db.async(return_result=False)
- with pytest.raises(AsyncExecuteError):
- async.collection(col_name).insert({'_key': '1', 'val': 1})
- with pytest.raises(AsyncExecuteError):
- async.collection(col_name).properties()
- with pytest.raises(AsyncExecuteError):
- async.aql.execute('FOR d IN {} RETURN d'.format(col_name))
-
-
-@pytest.mark.order3
-def test_async_inserts_without_result():
- # Test precondition
- assert len(col) == 0
-
- # Insert test documents asynchronously with return_result False
- async = db.async(return_result=False)
- job1 = async.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = async.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = async.collection(col_name).insert({'_key': '3', 'val': 3})
+ async_col = async_db.collection(col.name)
+ assert async_col.username == username
+ assert async_col.context == 'async'
+ assert async_col.db_name == db.name
+ assert async_col.name == col.name
- # Ensure that no jobs were returned
- for job in [job1, job2, job3]:
- assert job is None
+ async_aql = async_db.aql
+ assert async_aql.username == username
+ assert async_aql.context == 'async'
+ assert async_aql.db_name == db.name
- # Ensure that the asynchronously requests went through
- sleep(0.5)
- assert len(col) == 3
- assert col['1']['val'] == 1
- assert col['2']['val'] == 2
- assert col['3']['val'] == 3
+ job = async_aql.execute('INVALID QUERY')
+ assert isinstance(job, AsyncJob)
+ assert isinstance(job.id, string_types)
+ assert repr(job) == ''.format(job.id)
-@pytest.mark.order4
-def test_async_inserts_with_result():
- # Test precondition
- assert len(col) == 0
+def test_async_execute_without_result(db, col, docs):
+ # Insert test documents asynchronously with return_result set to False
+ async_col = db.begin_async_execution(return_result=False).collection(
+ col.name)
- # Insert test documents asynchronously with return_result True
- async_col = db.async(return_result=True).collection(col_name)
- test_docs = [{'_key': str(i), 'val': str(i * 42)} for i in range(10000)]
- job1 = async_col.insert_many(test_docs, sync=True)
- job2 = async_col.insert_many(test_docs, sync=True)
- job3 = async_col.insert_many(test_docs, sync=True)
+ # Ensure that no jobs were returned
+ assert async_col.insert(docs[0]) is None
+ assert async_col.insert(docs[1]) is None
+ assert async_col.insert(docs[2]) is None
- # Test get result from a pending job
- with pytest.raises(AsyncJobResultError) as err:
- job3.result()
- assert 'Job {} not done'.format(job3.id) in err.value.message
+ # Ensure that the operations went through
+ wait_on_jobs(db)
+ assert extract('_key', col.all()) == ['1', '2', '3']
- # Test get result from finished but with existing jobs
- for job in [job1, job2, job3]:
- assert 'ArangoDB asynchronous job {}'.format(job.id) in repr(job)
- assert isinstance(job.id, string_types)
- wait_on_job(job)
- assert len(job.result()) == 10000
- # Test get result from missing jobs
- for job in [job1, job2, job3]:
- with pytest.raises(AsyncJobResultError) as err:
- job.result()
- assert 'Job {} missing'.format(job.id) in err.value.message
+def test_async_execute_error_in_result(db, col, docs):
+ db.collection(col.name).import_bulk(docs)
+ async_db = db.begin_async_execution(return_result=True)
- # Test get result without authentication
- setattr(getattr(job1, '_conn'), '_password', 'incorrect')
- with pytest.raises(AsyncJobResultError) as err:
+ # Test async execution of a bad AQL query
+ job = wait_on_job(async_db.aql.execute('INVALID QUERY'))
+ with pytest.raises(AQLQueryExecuteError) as err:
job.result()
- assert '401' in err.value.message
-
- # Retrieve the results of the jobs
- assert len(col) == 10000
-
-
-@pytest.mark.order5
-def test_async_query():
- # Set up test documents
- async = db.async(return_result=True)
- wait_on_job(async.collection(col_name).import_bulk([
- {'_key': '1', 'val': 1},
- {'_key': '2', 'val': 2},
- {'_key': '3', 'val': 3},
- ]))
-
- # Test asynchronous execution of an invalid AQL query
- job = async.aql.execute('THIS IS AN INVALID QUERY')
- wait_on_job(job)
- assert isinstance(job.result(), AQLQueryExecuteError)
-
- # Test asynchronous execution of a valid AQL query
- job = async.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
- count=True,
- batch_size=1,
- ttl=10,
- optimizer_rules=['+all']
- )
- wait_on_job(job)
- assert set(d['_key'] for d in job.result()) == {'1', '2', '3'}
-
- # Test asynchronous execution of another valid AQL query
- job = async.aql.execute(
- 'FOR d IN {} FILTER d.val == @value RETURN d'.format(col_name),
- bind_vars={'value': 1},
- count=True
- )
- wait_on_job(job)
- assert set(d['_key'] for d in job.result()) == {'1'}
-
-
-@pytest.mark.order6
-def test_async_get_status():
- async_col = db.async(return_result=True).collection(col_name)
- test_docs = [{'_key': str(i), 'val': str(i * 42)} for i in range(10000)]
+ assert err.value.error_code == 1501
+
+
+def test_async_get_job_status(db, bad_db):
+ async_db = db.begin_async_execution(return_result=True)
# Test get status of a pending job
- job = async_col.insert_many(test_docs, sync=True)
+ job = async_db.aql.execute('RETURN SLEEP(0.1)', count=True)
assert job.status() == 'pending'
# Test get status of a finished job
- wait_on_job(job)
- assert job.status() == 'done'
- assert len(job.result()) == len(test_docs)
+ assert wait_on_job(job).status() == 'done'
+ assert job.result().count() == 1
# Test get status of a missing job
with pytest.raises(AsyncJobStatusError) as err:
job.status()
- assert 'Job {} missing'.format(job.id) in err.value.message
+ assert err.value.error_code == 404
- # Test get status without authentication
- setattr(getattr(job, '_conn'), '_password', 'incorrect')
+ # Test get status from invalid job
+ bad_job = wait_on_job(async_db.aql.execute('INVALID QUERY'))
+ bad_job._conn = bad_db._conn
with pytest.raises(AsyncJobStatusError) as err:
- job.status()
- assert 'HTTP 401' in err.value.message
-
-
-# @pytest.mark.order7
-# def test_cancel_async_job():
-# async_col = db.async(return_result=True).collection(col_name)
-# test_docs = [{'_key': str(i), 'val': str(i * 42)} for i in range(1)]
-#
-# job1 = async_col.insert_many(test_docs, sync=True)
-# job2 = async_col.insert_many(test_docs, sync=True)
-# job3 = async_col.insert_many(test_docs, sync=True)
-#
-# # Test cancel a pending job
-# assert job3.cancel() is True
-#
-# # Test cancel a finished job
-# for job in [job1, job2]:
-# wait_on_job(job)
-# assert job.status() == 'done'
-# with pytest.raises(AsyncJobCancelError) as err:
-# job.cancel()
-# assert 'Job {} missing'.format(job.id) in err.value.message
-# assert job.cancel(ignore_missing=True) is False
-#
-# # Test cancel a cancelled job
-# sleep(0.5)
-# with pytest.raises(AsyncJobCancelError) as err:
-# job3.cancel(ignore_missing=False)
-# assert 'Job {} missing'.format(job3.id) in err.value.message
-# assert job3.cancel(ignore_missing=True) is False
-#
-# # Test cancel without authentication
-# setattr(getattr(job1, '_conn'), '_password', 'incorrect')
-# with pytest.raises(AsyncJobCancelError) as err:
-# job1.cancel(ignore_missing=False)
-# assert 'HTTP 401' in err.value.message
-
-
-@pytest.mark.order8
-def test_clear_async_job():
- # Setup test asynchronous jobs
- async = db.async(return_result=True)
- job1 = async.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = async.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = async.collection(col_name).insert({'_key': '3', 'val': 3})
- for job in [job1, job2, job3]:
- wait_on_job(job)
+ bad_job.status()
+ assert err.value.error_code == 1228
- # Test clear finished jobs
- assert job1.clear(ignore_missing=True) is True
- assert job2.clear(ignore_missing=True) is True
- assert job3.clear(ignore_missing=False) is True
- # Test clear missing jobs
- for job in [job1, job2, job3]:
- with pytest.raises(AsyncJobClearError) as err:
- job.clear(ignore_missing=False)
- assert 'Job {} missing'.format(job.id) in err.value.message
- assert job.clear(ignore_missing=True) is False
+def test_async_get_job_result(db, bad_db):
+ async_db = db.begin_async_execution(return_result=True)
+
+ # Test get result from a pending job
+ job = async_db.aql.execute('RETURN SLEEP(0.1)', count=True)
+ with pytest.raises(AsyncJobResultError) as err:
+ job.result()
+ assert err.value.http_code == 204
+ assert '{} not done'.format(job.id) in str(err.value)
+
+ # Test get result from a finished job
+ assert wait_on_job(job).result().count() == 1
+
+ # Test get result from a cleared job
+ with pytest.raises(AsyncJobResultError) as err:
+ job.result()
+ assert err.value.error_code == 404
- # Test clear without authentication
- setattr(getattr(job1, '_conn'), '_password', 'incorrect')
+ # Test get result from an invalid job
+ bad_job = async_db.aql.execute('INVALID QUERY')
+ bad_job._conn = bad_db._conn
+ with pytest.raises(AsyncJobResultError) as err:
+ bad_job.result()
+ assert err.value.error_code == 1228
+
+
+def test_async_cancel_job(db, bad_db):
+ async_db = db.begin_async_execution(return_result=True)
+
+ # Start a long running request to ensure that job can be cancelled
+ job = async_db.aql.execute('RETURN SLEEP(5)')
+
+ # Test cancel a pending job
+ assert job.cancel() is True
+
+ # Test cancel a missing job
+ job._id = 'invalid_id'
+ with pytest.raises(AsyncJobCancelError) as err:
+ job.cancel(ignore_missing=False)
+ assert err.value.error_code == 404
+ assert job.cancel(ignore_missing=True) is False
+
+ # Test cancel an invalid job
+ job = async_db.aql.execute('RETURN SLEEP(5)')
+ job._conn = bad_db._conn
+ with pytest.raises(AsyncJobCancelError) as err:
+ job.cancel()
+ assert err.value.error_code == 1228
+
+
+def test_async_clear_job(db, bad_db):
+ async_db = db.begin_async_execution(return_result=True)
+
+ job = async_db.aql.execute('RETURN 1')
+
+ # Test clear finished job
+ assert job.clear(ignore_missing=True) is True
+
+ # Test clear missing job
with pytest.raises(AsyncJobClearError) as err:
- job1.clear(ignore_missing=False)
- assert 'HTTP 401' in err.value.message
+ job.clear(ignore_missing=False)
+ assert err.value.error_code == 404
+ assert job.clear(ignore_missing=True) is False
+ # Test clear with an invalid job
+ job._conn = bad_db._conn
+ with pytest.raises(AsyncJobClearError) as err:
+ job.clear()
+ assert err.value.error_code == 1228
+
+
+def test_async_execute_errors(bad_db):
+ bad_async_db = bad_db.begin_async_execution(return_result=False)
+ with pytest.raises(AsyncExecuteError) as err:
+ bad_async_db.aql.execute('RETURN 1')
+ assert err.value.error_code == 1228
+
+ bad_async_db = bad_db.begin_async_execution(return_result=True)
+ with pytest.raises(AsyncExecuteError) as err:
+ bad_async_db.aql.execute('RETURN 1')
+ assert err.value.error_code == 1228
-@pytest.mark.order9
-def test_clear_async_jobs():
- # Set up test documents
- async = db.async(return_result=True)
- job1 = async.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = async.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = async.collection(col_name).insert({'_key': '3', 'val': 3})
- for job in [job1, job2, job3]:
- wait_on_job(job)
- assert job.status() == 'done'
+
+def test_async_clear_jobs(db, bad_db, col, docs):
+ async_db = db.begin_async_execution(return_result=True)
+ async_col = async_db.collection(col.name)
+
+ job1 = wait_on_job(async_col.insert(docs[0]))
+ job2 = wait_on_job(async_col.insert(docs[1]))
+ job3 = wait_on_job(async_col.insert(docs[2]))
# Test clear all async jobs
- assert arango_client.clear_async_jobs() is True
+ assert db.clear_async_jobs() is True
for job in [job1, job2, job3]:
with pytest.raises(AsyncJobStatusError) as err:
job.status()
- assert 'Job {} missing'.format(job.id) in err.value.message
+ assert err.value.error_code == 404
# Set up test documents again
- async = db.async(return_result=True)
- job1 = async.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = async.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = async.collection(col_name).insert({'_key': '3', 'val': 3})
- for job in [job1, job2, job3]:
- wait_on_job(job)
- assert job.status() == 'done'
+ job1 = wait_on_job(async_col.insert(docs[0]))
+ job2 = wait_on_job(async_col.insert(docs[1]))
+ job3 = wait_on_job(async_col.insert(docs[2]))
- # Test clear jobs that have not expired yet
- past = int(time()) - 1000000
- assert arango_client.clear_async_jobs(threshold=past) is True
+ # Test clear jobs that have expired
+ past = int(time.time()) - 1000000
+ assert db.clear_async_jobs(threshold=past) is True
for job in [job1, job2, job3]:
assert job.status() == 'done'
- future = int(time()) + 1000000
- assert arango_client.clear_async_jobs(threshold=future) is True
+ # Test clear jobs that have not expired yet
+ future = int(time.time()) + 1000000
+ assert db.clear_async_jobs(threshold=future) is True
for job in [job1, job2, job3]:
with pytest.raises(AsyncJobStatusError) as err:
job.status()
- assert 'Job {} missing'.format(job.id) in err.value.message
+ assert err.value.error_code == 404
- # Test clear job without authentication
+ # Test clear job with bad database
with pytest.raises(AsyncJobClearError) as err:
- ArangoClient(password='incorrect').clear_async_jobs()
- assert 'HTTP 401' in err.value.message
-
-
-@pytest.mark.order10
-def test_list_async_jobs():
- # Set up test documents
- async = db.async(return_result=True)
- job1 = async.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = async.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = async.collection(col_name).insert({'_key': '3', 'val': 3})
- jobs = [job1, job2, job3]
- for job in jobs:
- wait_on_job(job)
- expected_job_ids = [job.id for job in jobs]
+ bad_db.clear_async_jobs()
+ assert err.value.error_code == 1228
+
+
+def test_async_list_jobs(db, col, docs):
+ async_db = db.begin_async_execution(return_result=True)
+ async_col = async_db.collection(col.name)
+
+ job1 = wait_on_job(async_col.insert(docs[0]))
+ job2 = wait_on_job(async_col.insert(docs[1]))
+ job3 = wait_on_job(async_col.insert(docs[2]))
# Test list async jobs that are done
- job_ids = arango_client.async_jobs(status='done')
- assert sorted(expected_job_ids) == sorted(job_ids)
+ job_ids = db.async_jobs(status='done')
+ assert job1.id in job_ids
+ assert job2.id in job_ids
+ assert job3.id in job_ids
# Test list async jobs that are pending
- assert arango_client.async_jobs(status='pending') == []
+ job4 = async_db.aql.execute('RETURN SLEEP(0.1)')
+ assert db.async_jobs(status='pending') == [job4.id]
+ wait_on_job(job4) # Make sure the job is done
# Test list async jobs with invalid status
- with pytest.raises(AsyncJobListError):
- arango_client.async_jobs(status='bad_status')
+ with pytest.raises(AsyncJobListError) as err:
+ db.async_jobs(status='bad_status')
+ assert err.value.error_code == 400
# Test list jobs with count
- job_ids = arango_client.async_jobs(status='done', count=1)
+ job_ids = db.async_jobs(status='done', count=1)
assert len(job_ids) == 1
- assert job_ids[0] in expected_job_ids
+ assert job_ids[0] in [job1.id, job2.id, job3.id, job4.id]
diff --git a/tests/test_async_new.py b/tests/test_async_new.py
deleted file mode 100644
index f4a60709..00000000
--- a/tests/test_async_new.py
+++ /dev/null
@@ -1,411 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from time import sleep, time
-
-import pytest
-from six import string_types
-
-from arango import ArangoClient
-from arango.aql import AQL
-from arango.collections import Collection
-from arango.exceptions import (
- AsyncExecuteError,
- AsyncJobCancelError,
- AsyncJobClearError,
- AsyncJobResultError,
- AsyncJobStatusError,
- AsyncJobListError,
- AQLQueryExecuteError
-)
-from arango.graph import Graph
-
-from .utils import (
- generate_db_name,
- generate_col_name
-)
-
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-col.add_fulltext_index(fields=['val'])
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
-
-
-def setup_function(*_):
- col.truncate()
-
-
-def wait_on_job(job):
- while job.status() == 'pending':
- pass
-
-
-@pytest.mark.order1
-def test_init():
- asynchronous = db.asynchronous(return_result=True)
-
- assert asynchronous.type == 'async'
- assert 'ArangoDB asynchronous execution' in repr(asynchronous)
- assert isinstance(asynchronous.aql, AQL)
- assert isinstance(asynchronous.graph('test'), Graph)
- assert isinstance(asynchronous.collection('test'), Collection)
-
-
-@pytest.mark.order2
-def test_async_execute_error():
- bad_db = arango_client.db(
- name=db_name,
- username='root',
- password='incorrect'
- )
- asynchronous = bad_db.asynchronous(return_result=False)
- with pytest.raises(AsyncExecuteError):
- asynchronous.collection(col_name).insert({'_key': '1', 'val': 1})
- with pytest.raises(AsyncExecuteError):
- asynchronous.collection(col_name).properties()
- with pytest.raises(AsyncExecuteError):
- asynchronous.aql.execute('FOR d IN {} RETURN d'.format(col_name))
-
-
-@pytest.mark.order3
-def test_async_inserts_without_result():
- # Test precondition
- assert len(col) == 0
-
- # Insert test documents asynchronously with return_result False
- asynchronous = db.asynchronous(return_result=False)
- job1 = asynchronous.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = asynchronous.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = asynchronous.collection(col_name).insert({'_key': '3', 'val': 3})
-
- # Ensure that no jobs were returned
- for job in [job1, job2, job3]:
- assert job is None
-
- # Ensure that the asynchronously requests went through
- sleep(0.5)
- assert len(col) == 3
- assert col['1']['val'] == 1
- assert col['2']['val'] == 2
- assert col['3']['val'] == 3
-
-
-@pytest.mark.order4
-def test_async_inserts_with_result():
- # Test precondition
- assert len(col) == 0
-
- # Insert test documents asynchronously with return_result True
- async_col = db.asynchronous(return_result=True).collection(col_name)
- test_docs = [{'_key': str(i), 'val': str(i * 42)} for i in range(10000)]
- job1 = async_col.insert_many(test_docs, sync=True)
- job2 = async_col.insert_many(test_docs, sync=True)
- job3 = async_col.insert_many(test_docs, sync=True)
-
- # Test get result from a pending job
- with pytest.raises(AsyncJobResultError) as err:
- job3.result()
- assert 'Job {} not done'.format(job3.id) in err.value.message
-
- # Test get result from finished but with existing jobs
- for job in [job1, job2, job3]:
- assert 'ArangoDB asynchronous job {}'.format(job.id) in repr(job)
- assert isinstance(job.id, string_types)
- wait_on_job(job)
- assert len(job.result()) == 10000
-
- # Test get result from missing jobs
- for job in [job1, job2, job3]:
- with pytest.raises(AsyncJobResultError) as err:
- job.result()
- assert 'Job {} missing'.format(job.id) in err.value.message
-
- # Test get result without authentication
- setattr(getattr(job1, '_conn'), '_password', 'incorrect')
- with pytest.raises(AsyncJobResultError) as err:
- job.result()
- assert '401' in err.value.message
-
- # Retrieve the results of the jobs
- assert len(col) == 10000
-
-
-@pytest.mark.order5
-def test_async_query():
- # Set up test documents
- asynchronous = db.asynchronous(return_result=True)
- wait_on_job(asynchronous.collection(col_name).import_bulk([
- {'_key': '1', 'val': 1},
- {'_key': '2', 'val': 2},
- {'_key': '3', 'val': 3},
- ]))
-
- # Test asynchronous execution of an invalid AQL query
- job = asynchronous.aql.execute('THIS IS AN INVALID QUERY')
- wait_on_job(job)
- assert isinstance(job.result(), AQLQueryExecuteError)
-
- # Test asynchronous execution of a valid AQL query
- job = asynchronous.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
- count=True,
- batch_size=1,
- ttl=10,
- optimizer_rules=['+all']
- )
- wait_on_job(job)
- assert set(d['_key'] for d in job.result()) == {'1', '2', '3'}
-
- # Test asynchronous execution of another valid AQL query
- job = asynchronous.aql.execute(
- 'FOR d IN {} FILTER d.val == @value RETURN d'.format(col_name),
- bind_vars={'value': 1},
- count=True
- )
- wait_on_job(job)
- assert set(d['_key'] for d in job.result()) == {'1'}
-
-
-@pytest.mark.order6
-def test_async_get_status():
- async_col = db.asynchronous(return_result=True).collection(col_name)
- test_docs = [{'_key': str(i), 'val': str(i * 42)} for i in range(10000)]
-
- # Test get status of a pending job
- job = async_col.insert_many(test_docs, sync=True)
- assert job.status() == 'pending'
-
- # Test get status of a finished job
- wait_on_job(job)
- assert job.status() == 'done'
- assert len(job.result()) == len(test_docs)
-
- # Test get status of a missing job
- with pytest.raises(AsyncJobStatusError) as err:
- job.status()
- assert 'Job {} missing'.format(job.id) in err.value.message
-
- # Test get status without authentication
- setattr(getattr(job, '_conn'), '_password', 'incorrect')
- with pytest.raises(AsyncJobStatusError) as err:
- job.status()
- assert 'HTTP 401' in err.value.message
-
-
-# @pytest.mark.order7
-# def test_cancel_async_job():
-# async_col = db.asynchronous(return_result=True).collection(col_name)
-# test_docs = [{'_key': str(i), 'val': str(i * 42)} for i in range(1)]
-#
-# job1 = async_col.insert_many(test_docs, sync=True)
-# job2 = async_col.insert_many(test_docs, sync=True)
-# job3 = async_col.insert_many(test_docs, sync=True)
-#
-# # Test cancel a pending job
-# assert job3.cancel() is True
-#
-# # Test cancel a finished job
-# for job in [job1, job2]:
-# wait_on_job(job)
-# assert job.status() == 'done'
-# with pytest.raises(AsyncJobCancelError) as err:
-# job.cancel()
-# assert 'Job {} missing'.format(job.id) in err.value.message
-# assert job.cancel(ignore_missing=True) is False
-#
-# # Test cancel a cancelled job
-# sleep(0.5)
-# with pytest.raises(AsyncJobCancelError) as err:
-# job3.cancel(ignore_missing=False)
-# assert 'Job {} missing'.format(job3.id) in err.value.message
-# assert job3.cancel(ignore_missing=True) is False
-#
-# # Test cancel without authentication
-# setattr(getattr(job1, '_conn'), '_password', 'incorrect')
-# with pytest.raises(AsyncJobCancelError) as err:
-# job1.cancel(ignore_missing=False)
-# assert 'HTTP 401' in err.value.message
-
-
-@pytest.mark.order8
-def test_clear_async_job():
- # Setup test asynchronous jobs
- asynchronous = db.asynchronous(return_result=True)
- job1 = asynchronous.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = asynchronous.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = asynchronous.collection(col_name).insert({'_key': '3', 'val': 3})
- for job in [job1, job2, job3]:
- wait_on_job(job)
-
- # Test clear finished jobs
- assert job1.clear(ignore_missing=True) is True
- assert job2.clear(ignore_missing=True) is True
- assert job3.clear(ignore_missing=False) is True
-
- # Test clear missing jobs
- for job in [job1, job2, job3]:
- with pytest.raises(AsyncJobClearError) as err:
- job.clear(ignore_missing=False)
- assert 'Job {} missing'.format(job.id) in err.value.message
- assert job.clear(ignore_missing=True) is False
-
- # Test clear without authentication
- setattr(getattr(job1, '_conn'), '_password', 'incorrect')
- with pytest.raises(AsyncJobClearError) as err:
- job1.clear(ignore_missing=False)
- assert 'HTTP 401' in err.value.message
-
-
-@pytest.mark.order9
-def test_clear_async_jobs():
- # Set up test documents
- asynchronous = db.asynchronous(return_result=True)
- job1 = asynchronous.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = asynchronous.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = asynchronous.collection(col_name).insert({'_key': '3', 'val': 3})
- for job in [job1, job2, job3]:
- wait_on_job(job)
- assert job.status() == 'done'
-
- # Test clear all async jobs
- assert arango_client.clear_async_jobs() is True
- for job in [job1, job2, job3]:
- with pytest.raises(AsyncJobStatusError) as err:
- job.status()
- assert 'Job {} missing'.format(job.id) in err.value.message
-
- # Set up test documents again
- asynchronous = db.asynchronous(return_result=True)
- job1 = asynchronous.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = asynchronous.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = asynchronous.collection(col_name).insert({'_key': '3', 'val': 3})
- for job in [job1, job2, job3]:
- wait_on_job(job)
- assert job.status() == 'done'
-
- # Test clear jobs that have not expired yet
- past = int(time()) - 1000000
- assert arango_client.clear_async_jobs(threshold=past) is True
- for job in [job1, job2, job3]:
- assert job.status() == 'done'
-
- future = int(time()) + 1000000
- assert arango_client.clear_async_jobs(threshold=future) is True
- for job in [job1, job2, job3]:
- with pytest.raises(AsyncJobStatusError) as err:
- job.status()
- assert 'Job {} missing'.format(job.id) in err.value.message
-
- # Test clear job without authentication
- with pytest.raises(AsyncJobClearError) as err:
- ArangoClient(password='incorrect').clear_async_jobs()
- assert 'HTTP 401' in err.value.message
-
-
-@pytest.mark.order10
-def test_list_async_jobs():
- # Set up test documents
- asynchronous = db.asynchronous(return_result=True)
- job1 = asynchronous.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = asynchronous.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = asynchronous.collection(col_name).insert({'_key': '3', 'val': 3})
- jobs = [job1, job2, job3]
- for job in jobs:
- wait_on_job(job)
- expected_job_ids = [job.id for job in jobs]
-
- # Test list async jobs that are done
- job_ids = arango_client.async_jobs(status='done')
- assert sorted(expected_job_ids) == sorted(job_ids)
-
- # Test list async jobs that are pending
- assert arango_client.async_jobs(status='pending') == []
-
- # Test list async jobs with invalid status
- with pytest.raises(AsyncJobListError):
- arango_client.async_jobs(status='bad_status')
-
- # Test list jobs with count
- job_ids = arango_client.async_jobs(status='done', count=1)
- assert len(job_ids) == 1
- assert job_ids[0] in expected_job_ids
-
-
-@pytest.mark.order11
-def test_clear_async_jobs_db_level():
- # Set up test documents
- asynchronous = db.asynchronous(return_result=True)
- job1 = asynchronous.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = asynchronous.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = asynchronous.collection(col_name).insert({'_key': '3', 'val': 3})
- for job in [job1, job2, job3]:
- wait_on_job(job)
- assert job.status() == 'done'
-
- # Test clear all async jobs
- assert db.clear_async_jobs() is True
- for job in [job1, job2, job3]:
- with pytest.raises(AsyncJobStatusError) as err:
- job.status()
- assert 'Job {} missing'.format(job.id) in err.value.message
-
- # Set up test documents again
- asynchronous = db.asynchronous(return_result=True)
- job1 = asynchronous.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = asynchronous.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = asynchronous.collection(col_name).insert({'_key': '3', 'val': 3})
- for job in [job1, job2, job3]:
- wait_on_job(job)
- assert job.status() == 'done'
-
- # Test clear jobs that have not expired yet
- past = int(time()) - 1000000
- assert db.clear_async_jobs(threshold=past) is True
- for job in [job1, job2, job3]:
- assert job.status() == 'done'
-
- future = int(time()) + 1000000
- assert db.clear_async_jobs(threshold=future) is True
- for job in [job1, job2, job3]:
- with pytest.raises(AsyncJobStatusError) as err:
- job.status()
- assert 'Job {} missing'.format(job.id) in err.value.message
-
- # Test clear job without authentication
- with pytest.raises(AsyncJobClearError) as err:
- ArangoClient(password='incorrect').db(db_name).clear_async_jobs()
- assert 'HTTP 401' in err.value.message
-
-
-@pytest.mark.order12
-def test_list_async_jobs_db_level():
- # Set up test documents
- asynchronous = db.asynchronous(return_result=True)
- job1 = asynchronous.collection(col_name).insert({'_key': '1', 'val': 1})
- job2 = asynchronous.collection(col_name).insert({'_key': '2', 'val': 2})
- job3 = asynchronous.collection(col_name).insert({'_key': '3', 'val': 3})
- jobs = [job1, job2, job3]
- for job in jobs:
- wait_on_job(job)
- expected_job_ids = [job.id for job in jobs]
-
- # Test list async jobs that are done
- job_ids = db.async_jobs(status='done')
- assert sorted(expected_job_ids) == sorted(job_ids)
-
- # Test list async jobs that are pending
- assert db.async_jobs(status='pending') == []
-
- # Test list async jobs with invalid status
- with pytest.raises(AsyncJobListError):
- db.async_jobs(status='bad_status')
-
- # Test list jobs with count
- job_ids = db.async_jobs(status='done', count=1)
- assert len(job_ids) == 1
- assert job_ids[0] in expected_job_ids
diff --git a/tests/test_batch.py b/tests/test_batch.py
index 62986dfb..b932ee7c 100644
--- a/tests/test_batch.py
+++ b/tests/test_batch.py
@@ -1,228 +1,170 @@
from __future__ import absolute_import, unicode_literals
-from uuid import UUID
-
+import mock
import pytest
+from six import string_types
-from arango import ArangoClient
-from arango.aql import AQL
-from arango.collections import Collection
+from arango.database import BatchDatabase
from arango.exceptions import (
- DocumentRevisionError,
DocumentInsertError,
- BatchExecuteError
-)
-from arango.graph import Graph
-
-from .utils import (
- generate_db_name,
- generate_col_name,
+ BatchExecuteError,
+ BatchJobResultError,
+ BatchStateError
)
-
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-bad_db_name = generate_db_name()
-bad_db = arango_client.db(bad_db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
-
-
-def setup_function(*_):
- col.truncate()
-
-
-def test_init():
- batch = db.batch()
- assert batch.type == 'batch'
- assert 'ArangoDB batch execution {}'.format(batch.id) in repr(batch)
- assert isinstance(batch.aql, AQL)
- assert isinstance(batch.graph('test'), Graph)
- assert isinstance(batch.collection('test'), Collection)
-
-
-def test_batch_job_properties():
- with db.batch(return_result=True) as batch:
- batch_col = batch.collection(col_name)
- job = batch_col.insert({'_key': '1', 'val': 1})
-
- assert isinstance(job.id, UUID)
- assert 'ArangoDB batch job {}'.format(job.id) in repr(job)
-
-
-def test_batch_empty_commit():
- batch = db.batch(return_result=True)
- assert batch.commit() is None
-
-
-def test_batch_invalid_commit():
- assert len(col) == 0
- batch = bad_db.batch(return_result=True)
- batch_col = batch.collection(col_name)
- batch_col.insert({'_key': '1', 'val': 1})
- batch_col.insert({'_key': '2', 'val': 2})
- batch_col.insert({'_key': '2', 'val': 3})
-
- with pytest.raises(BatchExecuteError):
- batch.commit()
- assert len(col) == 0
-
-
-def test_batch_insert_context_manager_with_result():
- assert len(col) == 0
- with db.batch(return_result=True) as batch:
- batch_col = batch.collection(col_name)
- job1 = batch_col.insert({'_key': '1', 'val': 1})
- job2 = batch_col.insert({'_key': '2', 'val': 2})
- job3 = batch_col.insert({'_key': '2', 'val': 3})
- job4 = batch_col.get(key='2', rev='9999')
-
- assert len(col) == 2
- assert col['1']['val'] == 1
- assert col['2']['val'] == 2
-
- assert job1.status() == 'done'
- assert job1.result()['_key'] == '1'
-
- assert job2.status() == 'done'
- assert job2.result()['_key'] == '2'
-
- assert job3.status() == 'error'
- assert isinstance(job3.result(), DocumentInsertError)
-
- assert job4.status() == 'error'
- assert isinstance(job4.result(), DocumentRevisionError)
-
-
-def test_batch_insert_context_manager_without_result():
- assert len(col) == 0
- with db.batch(return_result=False) as batch:
- batch_col = batch.collection(col_name)
- job1 = batch_col.insert({'_key': '1', 'val': 1})
- job2 = batch_col.insert({'_key': '2', 'val': 2})
- job3 = batch_col.insert({'_key': '2', 'val': 3})
- assert len(col) == 2
- assert col['1']['val'] == 1
- assert col['2']['val'] == 2
- assert job1 is None
- assert job2 is None
- assert job3 is None
-
-
-def test_batch_insert_context_manager_commit_on_error():
- assert len(col) == 0
- try:
- with db.batch(return_result=True, commit_on_error=True) as batch:
- batch_col = batch.collection(col_name)
- job1 = batch_col.insert({'_key': '1', 'val': 1})
- raise ValueError('Error!')
- except ValueError:
- assert col['1']['val'] == 1
- assert job1.status() == 'done'
- assert job1.result()['_key'] == '1'
-
-
-def test_batch_insert_context_manager_no_commit_on_error():
- assert len(col) == 0
- try:
- with db.batch(return_result=True, commit_on_error=False) as batch:
- batch_col = batch.collection(col_name)
- job1 = batch_col.insert({'_key': '1', 'val': 1})
- raise ValueError('Error!')
- except ValueError:
- assert len(col) == 0
- assert job1.status() == 'pending'
- assert job1.result() is None
-
-
-def test_batch_insert_no_context_manager_with_result():
- assert len(col) == 0
- batch = db.batch(return_result=True)
- batch_col = batch.collection(col_name)
- job1 = batch_col.insert({'_key': '1', 'val': 1})
- job2 = batch_col.insert({'_key': '2', 'val': 2})
- job3 = batch_col.insert({'_key': '2', 'val': 3})
-
- assert len(col) == 0
- assert job1.status() == 'pending'
- assert job1.result() is None
-
- assert job2.status() == 'pending'
- assert job2.result() is None
-
- assert job3.status() == 'pending'
- assert job3.result() is None
-
- batch.commit()
- assert len(col) == 2
- assert col['1']['val'] == 1
- assert col['2']['val'] == 2
-
- assert job1.status() == 'done'
- assert job1.result()['_key'] == '1'
-
- assert job2.status() == 'done'
- assert job2.result()['_key'] == '2'
-
- assert job3.status() == 'error'
- assert isinstance(job3.result(), DocumentInsertError)
-
-
-def test_batch_insert_no_context_manager_without_result():
- assert len(col) == 0
- batch = db.batch(return_result=False)
- batch_col = batch.collection(col_name)
- job1 = batch_col.insert({'_key': '1', 'val': 1})
- job2 = batch_col.insert({'_key': '2', 'val': 2})
- job3 = batch_col.insert({'_key': '2', 'val': 3})
-
- assert job1 is None
- assert job2 is None
- assert job3 is None
-
- batch.commit()
- assert len(col) == 2
- assert col['1']['val'] == 1
- assert col['2']['val'] == 2
-
-
-def test_batch_query_context_manager_with_result():
- with db.batch(return_result=True, commit_on_error=False) as batch:
- job1 = batch.collection(col_name).import_bulk([
- {'_key': '1', 'val': 1},
- {'_key': '2', 'val': 2},
- {'_key': '3', 'val': 3},
- ])
- job2 = batch.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
- count=True,
- batch_size=1,
- ttl=10,
- optimizer_rules=['+all']
- )
- job3 = batch.aql.execute(
- 'FOR d IN {} FILTER d.val == @value RETURN d'.format(col_name),
- bind_vars={'value': 1},
- count=True
- )
- assert job1.result()['created'] == 3
- assert set(d['_key'] for d in job2.result()) == {'1', '2', '3'}
- assert set(d['_key'] for d in job3.result()) == {'1'}
-
-
-def test_batch_clear():
- assert len(col) == 0
- batch = db.batch(return_result=True)
- batch_col = batch.collection(col_name)
- job1 = batch_col.insert({'_key': '1', 'val': 1})
- job2 = batch_col.insert({'_key': '2', 'val': 2})
- batch.clear()
- batch.commit()
-
+from arango.job import BatchJob
+from tests.helpers import extract, clean_doc
+
+
+def test_batch_wrapper_attributes(db, col, username):
+ batch_db = db.begin_batch_execution()
+ assert isinstance(batch_db, BatchDatabase)
+ assert batch_db.username == username
+ assert batch_db.context == 'batch'
+ assert batch_db.db_name == db.name
+ assert batch_db.name == db.name
+ assert repr(batch_db) == ''.format(db.name)
+
+ batch_col = batch_db.collection(col.name)
+ assert batch_col.username == username
+ assert batch_col.context == 'batch'
+ assert batch_col.db_name == db.name
+ assert batch_col.name == col.name
+
+ batch_aql = batch_db.aql
+ assert batch_aql.username == username
+ assert batch_aql.context == 'batch'
+ assert batch_aql.db_name == db.name
+
+ job = batch_aql.execute('INVALID QUERY')
+ assert isinstance(job, BatchJob)
+ assert isinstance(job.id, string_types)
+ assert repr(job) == ''.format(job.id)
+
+
+def test_batch_execute_without_result(db, col, docs):
+ with db.begin_batch_execution(return_result=False) as batch_db:
+ batch_col = batch_db.collection(col.name)
+
+ # Ensure that no jobs are returned
+ assert batch_col.insert(docs[0]) is None
+ assert batch_col.delete(docs[0]) is None
+ assert batch_col.insert(docs[1]) is None
+ assert batch_col.delete(docs[1]) is None
+ assert batch_col.insert(docs[2]) is None
+ assert batch_col.get(docs[2]) is None
+ assert batch_db.queued_jobs() is None
+
+ # Ensure that the operations went through
+ assert batch_db.queued_jobs() is None
+ assert extract('_key', col.all()) == [docs[2]['_key']]
+
+
+def test_batch_execute_with_result(db, col, docs):
+ with db.begin_batch_execution(return_result=True) as batch_db:
+ batch_col = batch_db.collection(col.name)
+ job1 = batch_col.insert(docs[0])
+ job2 = batch_col.insert(docs[1])
+ job3 = batch_col.insert(docs[1]) # duplicate
+ jobs = batch_db.queued_jobs()
+ assert jobs == [job1, job2, job3]
+ assert all(job.status() == 'pending' for job in jobs)
+
+ assert batch_db.queued_jobs() == [job1, job2, job3]
+ assert all(job.status() == 'done' for job in batch_db.queued_jobs())
+ assert extract('_key', col.all()) == extract('_key', docs[:2])
+
+ # Test successful results
+ assert job1.result()['_key'] == docs[0]['_key']
+ assert job2.result()['_key'] == docs[1]['_key']
+
+ # Test insert error result
+ with pytest.raises(DocumentInsertError) as err:
+ job3.result()
+ assert err.value.error_code == 1210
+
+
+def test_batch_empty_commit(db):
+ batch_db = db.begin_batch_execution(return_result=False)
+ assert batch_db.commit() is None
+
+ batch_db = db.begin_batch_execution(return_result=True)
+ assert batch_db.commit() == []
+
+
+def test_batch_double_commit(db, col, docs):
+ batch_db = db.begin_batch_execution()
+ job = batch_db.collection(col.name).insert(docs[0])
+
+ # Test first commit
+ assert batch_db.commit() == [job]
+ assert job.status() == 'done'
+ assert len(col) == 1
+ assert clean_doc(col.random()) == docs[0]
+
+ # Test second commit which should fail
+ with pytest.raises(BatchStateError) as err:
+ batch_db.commit()
+ assert 'already committed' in str(err.value)
+ assert len(col) == 1
+ assert clean_doc(col.random()) == docs[0]
+
+
+def test_batch_action_after_commit(db, col):
+ with db.begin_batch_execution() as batch_db:
+ batch_db.collection(col.name).insert({})
+
+ # Test insert after the batch has been committed
+ with pytest.raises(BatchStateError) as err:
+ batch_db.collection(col.name).insert({})
+ assert 'already committed' in str(err.value)
+ assert len(col) == 1
+
+
+def test_batch_execute_error(bad_db, col, docs):
+ batch_db = bad_db.begin_batch_execution(return_result=True)
+ job = batch_db.collection(col.name).insert_many(docs)
+
+ # Test batch execute with bad database
+ with pytest.raises(BatchExecuteError) as err:
+ batch_db.commit()
+ assert err.value.error_code == 1228
assert len(col) == 0
- assert job1.status() == 'pending'
- assert job2.status() == 'pending'
+ assert job.status() == 'pending'
+
+
+def test_batch_job_result_not_ready(db, col, docs):
+ batch_db = db.begin_batch_execution(return_result=True)
+ job = batch_db.collection(col.name).insert_many(docs)
+
+ # Test get job result before commit
+ with pytest.raises(BatchJobResultError) as err:
+ job.result()
+ assert str(err.value) == 'result not available yet'
+
+ # Test commit to make sure it still works after the errors
+ assert batch_db.commit() == [job]
+ assert len(job.result()) == len(docs)
+ assert extract('_key', col.all()) == extract('_key', docs)
+
+
+def test_batch_bad_state(db, col, docs):
+ batch_db = db.begin_batch_execution()
+ batch_col = batch_db.collection(col.name)
+ batch_col.insert(docs[0])
+ batch_col.insert(docs[1])
+ batch_col.insert(docs[2])
+
+ # Monkey patch the connection object
+ mock_resp = mock.MagicMock()
+ mock_resp.is_success = True
+ mock_resp.raw_body = ''
+ mock_send_request = mock.MagicMock()
+ mock_send_request.return_value = mock_resp
+ mock_connection = mock.MagicMock()
+ mock_connection.send_request = mock_send_request
+ batch_db._executor._conn = mock_connection
+
+ # Test commit with invalid batch state
+ with pytest.raises(BatchStateError) as err:
+ batch_db.commit()
+ assert 'expecting 3 parts in batch response but got 0' in str(err.value)
diff --git a/tests/test_client.py b/tests/test_client.py
index 45b0d118..08ff31e7 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -1,254 +1,103 @@
from __future__ import absolute_import, unicode_literals
-from datetime import datetime
-
import pytest
-from six import string_types
-
-from arango import ArangoClient
-from arango.http_clients import DefaultHTTPClient
-from arango.database import Database
-from arango.exceptions import *
-
-from .utils import generate_db_name, arango_version
-
-http_client = DefaultHTTPClient(use_session=False)
-arango_client = ArangoClient(http_client=http_client)
-bad_arango_client = ArangoClient(username='root', password='incorrect')
-db_name = generate_db_name()
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
-
-
-def test_verify():
- assert arango_client.verify() is True
- with pytest.raises(ServerConnectionError):
- ArangoClient(
- username='root',
- password='incorrect',
- verify=True
- )
-
-
-def test_properties():
- assert arango_client.protocol == 'http'
- assert arango_client.host == '127.0.0.1'
- assert arango_client.port == 8529
- assert arango_client.username == 'root'
- assert arango_client.password == ''
- assert arango_client.http_client == http_client
- assert arango_client.logging_enabled is True
- assert 'ArangoDB client for' in repr(arango_client)
-
-
-def test_version():
- version = arango_client.version()
- assert isinstance(version, string_types)
-
- with pytest.raises(ServerVersionError):
- bad_arango_client.version()
-
-
-def test_details():
- details = arango_client.details()
- assert 'architecture' in details
- assert 'server-version' in details
-
- with pytest.raises(ServerDetailsError):
- bad_arango_client.details()
-
-
-def test_required_db_version():
- version = arango_client.required_db_version()
- assert isinstance(version, string_types)
-
- with pytest.raises(ServerRequiredDBVersionError):
- bad_arango_client.required_db_version()
-
-
-def test_statistics():
- statistics = arango_client.statistics(description=False)
- assert isinstance(statistics, dict)
- assert 'time' in statistics
- assert 'system' in statistics
- assert 'server' in statistics
-
- description = arango_client.statistics(description=True)
- assert isinstance(description, dict)
- assert 'figures' in description
- assert 'groups' in description
-
- with pytest.raises(ServerStatisticsError):
- bad_arango_client.statistics()
-
-
-def test_role():
- assert arango_client.role() in {
- 'SINGLE',
- 'COORDINATOR',
- 'PRIMARY',
- 'SECONDARY',
- 'UNDEFINED'
- }
- with pytest.raises(ServerRoleError):
- bad_arango_client.role()
-
-
-def test_time():
- system_time = arango_client.time()
- assert isinstance(system_time, datetime)
-
- with pytest.raises(ServerTimeError):
- bad_arango_client.time()
-
-
-def test_echo():
- last_request = arango_client.echo()
- assert 'protocol' in last_request
- assert 'user' in last_request
- assert 'requestType' in last_request
- assert 'rawRequestBody' in last_request
-
- with pytest.raises(ServerEchoError):
- bad_arango_client.echo()
-
-
-def test_sleep():
- assert arango_client.sleep(0) == 0
-
- with pytest.raises(ServerSleepError):
- bad_arango_client.sleep(0)
-
-def test_execute():
- major, minor = arango_version(arango_client)
-
- # TODO ArangoDB 3.2 seems to be missing this API endpoint
- if not (major == 3 and minor == 2):
- assert arango_client.execute('return 1') == '1'
- assert arango_client.execute('return "test"') == '"test"'
- with pytest.raises(ServerExecuteError) as err:
- arango_client.execute('return invalid')
- assert 'Internal Server Error' in err.value.message
-
-
-# TODO test parameters
-def test_log():
- # Test read_log with default arguments
- log = arango_client.read_log(upto='fatal')
- assert 'lid' in log
- assert 'level' in log
- assert 'text' in log
- assert 'total_amount' in log
-
- # Test read_log with specific arguments
- log = arango_client.read_log(
- level='error',
- start=0,
- size=100000,
- offset=0,
- search='test',
- sort='desc',
+from arango.client import ArangoClient
+from arango.database import StandardDatabase
+from arango.exceptions import ServerConnectionError
+from arango.http import DefaultHTTPClient
+from arango.version import __version__
+from tests.helpers import (
+ generate_db_name,
+ generate_username,
+ generate_string
+)
+
+
+def test_client_attributes():
+ session = DefaultHTTPClient()
+ client = ArangoClient(
+ protocol='http',
+ host='127.0.0.1',
+ port=8529,
+ http_client=session
+ )
+ assert client.version == __version__
+ assert client.protocol == 'http'
+ assert client.host == '127.0.0.1'
+ assert client.port == 8529
+ assert client.base_url == 'http://127.0.0.1:8529'
+ assert repr(client) == ''
+
+
+def test_client_good_connection(db, username, password):
+ client = ArangoClient(
+ protocol='http',
+ host='127.0.0.1',
+ port=8529,
)
- assert 'lid' in log
- assert 'level' in log
- assert 'text' in log
- assert 'total_amount' in log
-
- # Test read_log with incorrect auth
- with pytest.raises(ServerReadLogError):
- bad_arango_client.read_log()
-
-
-def test_reload_routing():
- result = arango_client.reload_routing()
- assert isinstance(result, bool)
-
- with pytest.raises(ServerReloadRoutingError):
- bad_arango_client.reload_routing()
-
-
-def test_log_levels():
- major, minor = arango_version(arango_client)
- if major == 3 and minor >= 1:
-
- result = arango_client.log_levels()
- assert isinstance(result, dict)
-
- with pytest.raises(ServerLogLevelError):
- bad_arango_client.log_levels()
-
-
-def test_set_log_levels():
- major, minor = arango_version(arango_client)
- if major == 3 and minor >= 1:
-
- new_levels = {
- 'agency': 'DEBUG',
- 'collector': 'INFO',
- 'threads': 'WARNING'
- }
- result = arango_client.set_log_levels(**new_levels)
-
- for key, value in new_levels.items():
- assert result[key] == value
-
- for key, value in arango_client.log_levels().items():
- assert result[key] == value
-
- with pytest.raises(ServerLogLevelSetError):
- bad_arango_client.set_log_levels(**new_levels)
-
-
-def test_endpoints():
- endpoints = arango_client.endpoints()
- assert isinstance(endpoints, list)
- for endpoint in endpoints:
- assert 'endpoint' in endpoint
-
- with pytest.raises(ServerEndpointsError):
- bad_arango_client.endpoints()
-
-
-def test_database_management():
- # Test list databases
- # TODO something wrong here
- result = arango_client.databases()
- assert '_system' in result
- result = arango_client.databases(user_only=True)
- assert '_system' in result
- assert db_name not in arango_client.databases()
-
- with pytest.raises(DatabaseListError):
- bad_arango_client.databases()
-
- # Test create database
- result = arango_client.create_database(db_name)
- assert isinstance(result, Database)
- assert db_name in arango_client.databases()
-
- # Test get after create database
- assert isinstance(arango_client.db(db_name), Database)
- assert arango_client.db(db_name).name == db_name
-
- # Test create duplicate database
- with pytest.raises(DatabaseCreateError):
- arango_client.create_database(db_name)
-
- # Test list after create database
- assert db_name in arango_client.databases()
-
- # Test delete database
- result = arango_client.delete_database(db_name)
- assert result is True
- assert db_name not in arango_client.databases()
-
- # Test delete missing database
- with pytest.raises(DatabaseDeleteError):
- arango_client.delete_database(db_name)
- # Test delete missing database (ignore missing)
- result = arango_client.delete_database(db_name, ignore_missing=True)
- assert result is False
+ # Test connection with verify flag on and off
+ for verify in (True, False):
+ db = client.db(db.name, username, password, verify=verify)
+ assert isinstance(db, StandardDatabase)
+ assert db.name == db.name
+ assert db.username == username
+ assert db.context == 'default'
+
+
+def test_client_bad_connection(db, username, password):
+ client = ArangoClient(protocol='http', host='127.0.0.1', port=8529)
+
+ bad_db_name = generate_db_name()
+ bad_username = generate_username()
+ bad_password = generate_string()
+
+ # Test connection with bad username password
+ with pytest.raises(ServerConnectionError) as err:
+ client.db(db.name, bad_username, bad_password, verify=True)
+ assert 'bad username and/or password' in str(err.value)
+
+ # Test connection with missing database
+ with pytest.raises(ServerConnectionError) as err:
+ client.db(bad_db_name, bad_username, bad_password, verify=True)
+ assert 'database not found' in str(err.value)
+
+ # Test connection with invalid host URL
+ client._url = 'http://127.0.0.1:8500'
+ with pytest.raises(ServerConnectionError) as err:
+ client.db(db.name, username, password, verify=True)
+ assert 'bad connection' in str(err.value)
+
+
+def test_client_custom_http_client(db, username, password):
+
+ # Define custom HTTP client which increments the counter on any API call.
+ class MyHTTPClient(DefaultHTTPClient):
+
+ def __init__(self):
+ super(MyHTTPClient, self).__init__()
+ self.counter = 0
+
+ def send_request(self,
+ method,
+ url,
+ headers=None,
+ params=None,
+ data=None,
+ auth=None):
+ self.counter += 1
+ return super(MyHTTPClient, self).send_request(
+ method, url, headers, params, data, auth
+ )
+
+ http_client = MyHTTPClient()
+ client = ArangoClient(
+ protocol='http',
+ host='127.0.0.1',
+ port=8529,
+ http_client=http_client
+ )
+ # Set verify to True to send a test API call on initialization.
+ client.db(db.name, username, password, verify=True)
+ assert http_client.counter == 1
diff --git a/tests/test_cluster.py b/tests/test_cluster.py
deleted file mode 100644
index 84e97654..00000000
--- a/tests/test_cluster.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-import pytest
-from six import string_types
-
-from arango import ArangoClient
-from arango.aql import AQL
-from arango.collections import Collection
-from arango.exceptions import ClusterTestError
-from arango.graph import Graph
-
-from .utils import (
- generate_db_name,
- generate_col_name,
- generate_graph_name
-)
-
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-graph_name = generate_graph_name()
-graph = db.create_graph(graph_name)
-vcol_name = generate_col_name()
-graph.create_vertex_collection(vcol_name)
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
-
-
-@pytest.mark.order1
-def test_async_object():
- cluster = db.cluster(
- shard_id=1,
- transaction_id=1,
- timeout=2000,
- sync=False
- )
- assert cluster.type == 'cluster'
- assert 'ArangoDB cluster round-trip test' in repr(cluster)
- assert isinstance(cluster.aql, AQL)
- assert isinstance(cluster.graph(graph_name), Graph)
- assert isinstance(cluster.collection(col_name), Collection)
-
-
-@pytest.mark.order2
-def test_cluster_execute():
- cluster = db.cluster(
- shard_id='foo',
- transaction_id='bar',
- timeout=2000,
- sync=True
- )
- with pytest.raises(ClusterTestError):
- cluster.collection(col_name).checksum()
diff --git a/tests/test_collection.py b/tests/test_collection.py
index 4dbc69be..46f6e32a 100644
--- a/tests/test_collection.py
+++ b/tests/test_collection.py
@@ -1,174 +1,220 @@
from __future__ import absolute_import, unicode_literals
-import pytest
from six import string_types
-from arango import ArangoClient
-from arango.collections import Collection
-from arango.exceptions import *
-
-from .utils import (
- generate_db_name,
- generate_col_name
+from arango.collection import StandardCollection
+from arango.exceptions import (
+ CollectionChecksumError,
+ CollectionConfigureError,
+ CollectionLoadError,
+ CollectionPropertiesError,
+ CollectionRenameError,
+ CollectionRevisionError,
+ CollectionRotateJournalError,
+ CollectionStatisticsError,
+ CollectionTruncateError,
+ CollectionUnloadError,
+ CollectionCreateError,
+ CollectionListError,
+ CollectionDeleteError,
)
+from tests.helpers import assert_raises, extract, generate_col_name
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-bad_col_name = generate_col_name()
-bad_col = db.collection(bad_col_name)
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
+def test_collection_attributes(db, col, username):
+ assert col.context in ['default', 'async', 'batch', 'transaction']
+ assert col.username == username
+ assert col.db_name == db.name
+ assert col.name.startswith('test_collection')
+ assert repr(col) == ''.format(col.name)
-def setup_function(*_):
- col.truncate()
-
-def test_properties():
- assert col.name == col_name
- assert repr(col) == ''.format(col_name)
+def test_collection_misc_methods(col, bad_col):
+ # Test get properties
properties = col.properties()
- assert 'id' in properties
- assert properties['status'] in Collection.STATUSES.values()
- assert properties['name'] == col_name
- assert properties['edge'] is False
+ assert properties['name'] == col.name
assert properties['system'] is False
- assert isinstance(properties['sync'], bool)
- assert isinstance(properties['compact'], bool)
- assert isinstance(properties['volatile'], bool)
- assert isinstance(properties['journal_size'], int)
- assert properties['keygen'] in ('autoincrement', 'traditional')
- assert isinstance(properties['user_keys'], bool)
- if properties['key_increment'] is not None:
- assert isinstance(properties['key_increment'], int)
- if properties['key_offset'] is not None :
- assert isinstance(properties['key_offset'], int)
- with pytest.raises(CollectionBadStatusError):
- assert getattr(col, '_status')(10)
- with pytest.raises(CollectionPropertiesError):
- bad_col.properties()
-
-
-def test_configure():
- properties = col.properties()
- old_sync = properties['sync']
- old_journal_size = properties['journal_size']
-
- # Test preconditions
- new_sync = not old_sync
- new_journal_size = old_journal_size + 1
-
- # Test configure
- result = col.configure(sync=new_sync, journal_size=new_journal_size)
- assert result['sync'] == new_sync
- assert result['journal_size'] == new_journal_size
-
- # Test persistence
- new_properties = col.properties()
- assert new_properties['sync'] == new_sync
- assert new_properties['journal_size'] == new_journal_size
-
- # Test missing collection
- with pytest.raises(CollectionConfigureError):
- bad_col.configure(sync=new_sync, journal_size=new_journal_size)
-
-
-def test_rename():
- assert col.name == col_name
- new_name = generate_col_name()
- while new_name == bad_col_name:
- new_name = generate_col_name()
-
- # Test rename collection
- result = col.rename(new_name)
- assert result['name'] == new_name
- assert col.name == new_name
- assert repr(col) == ''.format(new_name)
-
- # Try again (the operation should be idempotent)
- result = col.rename(new_name)
- assert result['name'] == new_name
- assert col.name == new_name
- assert repr(col) == ''.format(new_name)
- with pytest.raises(CollectionRenameError):
- bad_col.rename(new_name)
+ # Test get properties with bad collection
+ with assert_raises(CollectionPropertiesError) as err:
+ bad_col.properties()
+ assert err.value.error_code == 1228
+
+ # Test configure properties
+ prev_sync = properties['sync']
+ properties = col.configure(
+ sync=not prev_sync,
+ journal_size=10000000
+ )
+ assert properties['name'] == col.name
+ assert properties['system'] is False
+ assert properties['sync'] is not prev_sync
+ # Test configure properties with bad collection
+ with assert_raises(CollectionConfigureError) as err:
+ bad_col.configure(sync=True, journal_size=10000000)
+ assert err.value.error_code == 1228
-def test_statistics():
+ # Test get statistics
stats = col.statistics()
- assert 'alive' in stats
- assert 'compactors' in stats
- assert 'dead' in stats
- assert 'document_refs' in stats
- assert 'journals' in stats
- with pytest.raises(CollectionStatisticsError):
+ assert isinstance(stats, dict)
+ assert 'indexes' in stats
+
+ # Test get statistics with bad collection
+ with assert_raises(CollectionStatisticsError) as err:
bad_col.statistics()
+ assert err.value.error_code == 1228
+ # Test get revision
+ assert isinstance(col.revision(), string_types)
-def test_revision():
- revision = col.revision()
- assert isinstance(revision, string_types)
- with pytest.raises(CollectionRevisionError):
+ # Test get revision with bad collection
+ with assert_raises(CollectionRevisionError) as err:
bad_col.revision()
+ assert err.value.error_code == 1228
+ # Test load into memory
+ assert col.load() is True
-def test_load():
- assert col.load() in {'loaded', 'loading'}
- with pytest.raises(CollectionLoadError):
+ # Test load with bad collection
+ with assert_raises(CollectionLoadError) as err:
bad_col.load()
+ assert err.value.error_code == 1228
+ # Test unload from memory
+ assert col.unload() is True
-def test_unload():
- assert col.unload() in {'unloaded', 'unloading'}
- with pytest.raises(CollectionUnloadError):
+ # Test unload with bad collection
+ with assert_raises(CollectionUnloadError) as err:
bad_col.unload()
-
-
-def test_rotate():
- # No journal should exist with an empty collection
- with pytest.raises(CollectionRotateJournalError):
- col.rotate()
-
-
-def test_checksum():
- # Test checksum for an empty collection
- assert col.checksum(with_rev=True, with_data=False) == 0
- assert col.checksum(with_rev=True, with_data=True) == 0
- assert col.checksum(with_rev=False, with_data=False) == 0
- assert col.checksum(with_rev=False, with_data=True) == 0
-
- # Test checksum for a non-empty collection
- col.insert({'value': 'bar'})
- assert col.checksum(with_rev=True, with_data=False) > 0
- assert col.checksum(with_rev=True, with_data=True) > 0
- assert col.checksum(with_rev=False, with_data=False) > 0
- assert col.checksum(with_rev=False, with_data=True) > 0
-
- # Test checksum for missing collection
- with pytest.raises(CollectionChecksumError):
+ assert err.value.error_code == 1228
+
+ # Test rotate journal
+ try:
+ assert isinstance(col.rotate(), bool)
+ except CollectionRotateJournalError as err:
+ assert err.error_code == 1105
+
+ # Test rotate journal with bad collection
+ with assert_raises(CollectionRotateJournalError) as err:
+ bad_col.rotate()
+ assert err.value.error_code == 1228
+
+ # Test checksum with empty collection
+ assert int(col.checksum(with_rev=True, with_data=False)) == 0
+ assert int(col.checksum(with_rev=True, with_data=True)) == 0
+ assert int(col.checksum(with_rev=False, with_data=False)) == 0
+ assert int(col.checksum(with_rev=False, with_data=True)) == 0
+
+ # Test checksum with non-empty collection
+ col.insert({})
+ assert int(col.checksum(with_rev=True, with_data=False)) > 0
+ assert int(col.checksum(with_rev=True, with_data=True)) > 0
+ assert int(col.checksum(with_rev=False, with_data=False)) > 0
+ assert int(col.checksum(with_rev=False, with_data=True)) > 0
+
+ # Test checksum with bad collection
+ with assert_raises(CollectionChecksumError) as err:
bad_col.checksum()
-
-
-def test_truncate():
- col.insert_many([{'value': 1}, {'value': 2}, {'value': 3}])
+ assert err.value.error_code == 1228
# Test preconditions
- assert len(col) == 3
+ assert len(col) == 1
# Test truncate collection
- result = col.truncate()
- assert 'id' in result
- assert 'name' in result
- assert 'status' in result
- assert 'is_system' in result
+ assert col.truncate() is True
assert len(col) == 0
- # Test truncate missing collection
- with pytest.raises(CollectionTruncateError):
+ # Test checksum with bad collection
+ with assert_raises(CollectionTruncateError) as err:
bad_col.truncate()
+ assert err.value.error_code == 1228
+
+
+def test_collection_management(db, bad_db):
+ # Test create collection
+ col_name = generate_col_name()
+ assert db.has_collection(col_name) is False
+
+ col = db.create_collection(
+ name=col_name,
+ sync=True,
+ compact=False,
+ journal_size=7774208,
+ system=False,
+ volatile=False,
+ key_generator='autoincrement',
+ user_keys=False,
+ key_increment=9,
+ key_offset=100,
+ edge=True,
+ shard_count=2,
+ shard_fields=['test_attr'],
+ index_bucket_count=10,
+ replication_factor=1
+ )
+ assert db.has_collection(col_name) is True
+
+ properties = col.properties()
+ if col.context != 'transaction':
+ assert 'id' in properties
+ assert properties['name'] == col_name
+ assert properties['sync'] is True
+ assert properties['system'] is False
+ assert properties['key_generator'] == 'autoincrement'
+ assert properties['user_keys'] is False
+ assert properties['key_increment'] == 9
+ assert properties['key_offset'] == 100
+
+ # Test create duplicate collection
+ with assert_raises(CollectionCreateError) as err:
+ db.create_collection(col_name)
+ assert err.value.error_code == 1207
+
+ # Test list collections
+ assert all(
+ entry['name'].startswith('test_collection')
+ or entry['name'].startswith('_')
+ for entry in db.collections()
+ )
+
+ # Test list collections with bad database
+ with assert_raises(CollectionListError) as err:
+ bad_db.collections()
+ assert err.value.error_code == 1228
+
+ # Test get collection object
+ test_col = db.collection(col.name)
+ assert isinstance(test_col, StandardCollection)
+ assert test_col.name == col.name
+
+ test_col = db[col.name]
+ assert isinstance(test_col, StandardCollection)
+ assert test_col.name == col.name
+
+ # Test delete collection
+ assert db.delete_collection(col_name, system=False) is True
+ assert col_name not in extract('name', db.collections())
+
+ # Test drop missing collection
+ with assert_raises(CollectionDeleteError) as err:
+ db.delete_collection(col_name)
+ assert err.value.error_code == 1203
+ assert db.delete_collection(col_name, ignore_missing=True) is False
+
+ # Test rename collection
+ new_name = generate_col_name()
+ col = db.create_collection(new_name)
+ assert col.rename(new_name) is True
+ assert col.name == new_name
+ assert repr(col) == ''.format(new_name)
+
+ # Try again (the operation should be idempotent)
+ assert col.rename(new_name) is True
+ assert col.name == new_name
+ assert repr(col) == ''.format(new_name)
+
+ # Test rename with bad collection
+ with assert_raises(CollectionRenameError) as err:
+ bad_db.collection(new_name).rename(new_name)
+ assert err.value.error_code == 1228
diff --git a/tests/test_cursor.py b/tests/test_cursor.py
index bd2d0eb6..55c31f51 100644
--- a/tests/test_cursor.py
+++ b/tests/test_cursor.py
@@ -2,311 +2,275 @@
import pytest
-from arango import ArangoClient
from arango.exceptions import (
+ CursorCloseError,
+ CursorEmptyError,
CursorNextError,
- CursorCloseError
+ CursorStateError,
)
+from tests.helpers import clean_doc
-from .utils import (
- generate_db_name,
- generate_col_name,
- clean_keys
-)
-
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-
-cursor = None
-cursor_id = None
-doc1 = {'_key': '1'}
-doc2 = {'_key': '2'}
-doc3 = {'_key': '3'}
-doc4 = {'_key': '4'}
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
+@pytest.fixture(autouse=True)
+def setup_collection(col, docs):
+ col.import_bulk(docs)
-@pytest.mark.order1
-def test_read_cursor_init():
- global cursor, cursor_id
- col.import_bulk([doc1, doc2, doc3, doc4])
+def test_cursor_from_execute_query(db, col, docs):
cursor = db.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
+ 'FOR d IN {} SORT d._key RETURN d'.format(col.name),
count=True,
batch_size=2,
ttl=1000,
- optimizer_rules=['+all']
+ optimizer_rules=['+all'],
+ profile=True
)
cursor_id = cursor.id
- assert 'ArangoDB cursor' in repr(cursor)
+ assert 'Cursor' in repr(cursor)
+ assert cursor.type == 'cursor'
assert cursor.has_more() is True
assert cursor.cached() is False
- assert cursor.statistics()['modified'] == 0
- assert cursor.statistics()['filtered'] == 0
- assert cursor.statistics()['ignored'] == 0
- assert cursor.statistics()['scanned_full'] == 4
- assert cursor.statistics()['scanned_index'] == 0
assert cursor.warnings() == []
- assert cursor.count() == 4
- assert clean_keys(cursor.batch()) == [doc1, doc2]
- assert isinstance(cursor.statistics()['execution_time'], (int, float))
+ assert cursor.count() == len(cursor) == 6
+ assert clean_doc(cursor.batch()) == docs[:2]
+
+ statistics = cursor.statistics()
+ assert statistics['modified'] == 0
+ assert statistics['filtered'] == 0
+ assert statistics['ignored'] == 0
+ assert statistics['scanned_full'] == 6
+ assert statistics['scanned_index'] == 0
+ assert statistics['execution_time'] > 0
+ assert statistics['http_requests'] == 0
+ assert cursor.warnings() == []
+ profile = cursor.profile()
+ assert profile['initializing'] > 0
+ assert profile['parsing'] > 0
-@pytest.mark.order2
-def test_read_cursor_first():
- assert clean_keys(cursor.next()) == doc1
+ assert clean_doc(cursor.next()) == docs[0]
assert cursor.id == cursor_id
assert cursor.has_more() is True
assert cursor.cached() is False
- assert cursor.statistics()['modified'] == 0
- assert cursor.statistics()['filtered'] == 0
- assert cursor.statistics()['ignored'] == 0
- assert cursor.statistics()['scanned_full'] == 4
- assert cursor.statistics()['scanned_index'] == 0
+ assert cursor.statistics() == statistics
+ assert cursor.profile() == profile
assert cursor.warnings() == []
- assert cursor.count() == 4
- assert clean_keys(cursor.batch()) == [doc2]
- assert isinstance(cursor.statistics()['execution_time'], (int, float))
-
+ assert cursor.count() == len(cursor) == 6
+ assert clean_doc(cursor.batch()) == [docs[1]]
-@pytest.mark.order3
-def test_read_cursor_second():
- clean_keys(cursor.next()) == doc2
+ assert clean_doc(cursor.next()) == docs[1]
assert cursor.id == cursor_id
assert cursor.has_more() is True
assert cursor.cached() is False
- assert cursor.statistics()['modified'] == 0
- assert cursor.statistics()['filtered'] == 0
- assert cursor.statistics()['ignored'] == 0
- assert cursor.statistics()['scanned_full'] == 4
- assert cursor.statistics()['scanned_index'] == 0
+ assert cursor.statistics() == statistics
+ assert cursor.profile() == profile
assert cursor.warnings() == []
- assert cursor.count() == 4
- assert clean_keys(cursor.batch()) == []
- assert isinstance(cursor.statistics()['execution_time'], (int, float))
+ assert cursor.count() == len(cursor) == 6
+ assert clean_doc(cursor.batch()) == []
-
-@pytest.mark.order4
-def test_read_cursor_third():
- clean_keys(cursor.next()) == doc3
- assert cursor.id is None
- assert cursor.has_more() is False
+ assert clean_doc(cursor.next()) == docs[2]
+ assert cursor.id == cursor_id
+ assert cursor.has_more() is True
assert cursor.cached() is False
- assert cursor.statistics()['modified'] == 0
- assert cursor.statistics()['filtered'] == 0
- assert cursor.statistics()['ignored'] == 0
- assert cursor.statistics()['scanned_full'] == 4
- assert cursor.statistics()['scanned_index'] == 0
+ assert cursor.statistics() == statistics
+ assert cursor.profile() == profile
assert cursor.warnings() == []
- assert cursor.count() == 4
- assert clean_keys(cursor.batch()) == [doc3]
- assert isinstance(cursor.statistics()['execution_time'], (int, float))
-
+ assert cursor.count() == len(cursor) == 6
+ assert clean_doc(cursor.batch()) == [docs[3]]
-@pytest.mark.order5
-def test_read_cursor_finish():
- clean_keys(cursor.next()) == doc4
- assert cursor.id is None
+ assert clean_doc(cursor.next()) == docs[3]
+ assert clean_doc(cursor.next()) == docs[4]
+ assert clean_doc(cursor.next()) == docs[5]
+ assert cursor.id == cursor_id
assert cursor.has_more() is False
- assert cursor.cached() is False
- assert cursor.statistics()['modified'] == 0
- assert cursor.statistics()['filtered'] == 0
- assert cursor.statistics()['ignored'] == 0
- assert cursor.statistics()['scanned_full'] == 4
- assert cursor.statistics()['scanned_index'] == 0
+ assert cursor.statistics() == statistics
+ assert cursor.profile() == profile
assert cursor.warnings() == []
- assert cursor.count() == 4
- assert clean_keys(cursor.batch()) == []
- assert isinstance(cursor.statistics()['execution_time'], (int, float))
+ assert cursor.count() == len(cursor) == 6
+ assert clean_doc(cursor.batch()) == []
with pytest.raises(StopIteration):
cursor.next()
assert cursor.close(ignore_missing=True) is False
- incorrect_cursor_data = {'id': 'invalid', 'hasMore': True, 'result': []}
- setattr(cursor, '_data', incorrect_cursor_data)
- with pytest.raises(CursorCloseError):
- cursor.close(ignore_missing=False)
- with pytest.raises(CursorNextError):
- cursor.next()
-
-
-@pytest.mark.order6
-def test_read_cursor_early_finish():
- global cursor, cursor_id
- col.truncate()
- col.import_bulk([doc1, doc2, doc3, doc4])
- cursor = db.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
- count=True,
- batch_size=2,
- ttl=1000,
- optimizer_rules=['+all']
- )
- assert cursor.close() is True
- with pytest.raises(CursorCloseError):
- cursor.close(ignore_missing=False)
-
- assert clean_keys(cursor.batch()) == [doc1, doc2]
-
-
-@pytest.mark.order7
-def test_write_cursor_init():
- global cursor, cursor_id
- col.truncate()
- col.import_bulk([doc1, doc2, doc3])
+def test_cursor_write_query(db, col, docs):
cursor = db.aql.execute(
'''
FOR d IN {col} FILTER d._key == @first OR d._key == @second
UPDATE {{_key: d._key, _val: @val }} IN {col}
RETURN NEW
- '''.format(col=col_name),
+ '''.format(col=col.name),
bind_vars={'first': '1', 'second': '2', 'val': 42},
count=True,
batch_size=1,
ttl=1000,
- optimizer_rules=['+all']
+ optimizer_rules=['+all'],
+ profile=True
)
cursor_id = cursor.id
+ assert 'Cursor' in repr(cursor)
assert cursor.has_more() is True
assert cursor.cached() is False
- assert cursor.statistics()['modified'] == 2
- assert cursor.statistics()['filtered'] == 0
- assert cursor.statistics()['ignored'] == 0
- assert cursor.statistics()['scanned_full'] == 0
- assert cursor.statistics()['scanned_index'] == 2
assert cursor.warnings() == []
- assert cursor.count() == 2
- assert clean_keys(cursor.batch()) == [doc1]
- assert isinstance(cursor.statistics()['execution_time'], (int, float))
+ assert cursor.count() == len(cursor) == 2
+ assert clean_doc(cursor.batch()) == [docs[0]]
+
+ statistics = cursor.statistics()
+ assert statistics['modified'] == 2
+ assert statistics['filtered'] == 0
+ assert statistics['ignored'] == 0
+ assert statistics['scanned_full'] == 0
+ assert statistics['scanned_index'] == 2
+ assert statistics['execution_time'] > 0
+ assert statistics['http_requests'] == 0
+ assert cursor.warnings() == []
+ profile = cursor.profile()
+ assert profile['initializing'] > 0
+ assert profile['parsing'] > 0
-@pytest.mark.order8
-def test_write_cursor_first():
- assert clean_keys(cursor.next()) == doc1
+ assert clean_doc(cursor.next()) == docs[0]
assert cursor.id == cursor_id
assert cursor.has_more() is True
assert cursor.cached() is False
- assert cursor.statistics()['modified'] == 2
- assert cursor.statistics()['filtered'] == 0
- assert cursor.statistics()['ignored'] == 0
- assert cursor.statistics()['scanned_full'] == 0
- assert cursor.statistics()['scanned_index'] == 2
+ assert cursor.statistics() == statistics
+ assert cursor.profile() == profile
assert cursor.warnings() == []
- assert cursor.count() == 2
- assert clean_keys(cursor.batch()) == []
- assert isinstance(cursor.statistics()['execution_time'], (int, float))
+ assert cursor.count() == len(cursor) == 2
+ assert clean_doc(cursor.batch()) == []
-
-@pytest.mark.order9
-def test_write_cursor_second():
- clean_keys(cursor.next()) == doc2
- assert cursor.id is None
+ assert clean_doc(cursor.next()) == docs[1]
+ assert cursor.id == cursor_id
assert cursor.has_more() is False
assert cursor.cached() is False
- assert cursor.statistics()['modified'] == 2
- assert cursor.statistics()['filtered'] == 0
- assert cursor.statistics()['ignored'] == 0
- assert cursor.statistics()['scanned_full'] == 0
- assert cursor.statistics()['scanned_index'] == 2
+ assert cursor.statistics() == statistics
+ assert cursor.profile() == profile
assert cursor.warnings() == []
- assert cursor.count() == 2
- assert clean_keys(cursor.batch()) == []
- assert isinstance(cursor.statistics()['execution_time'], (int, float))
- with pytest.raises(StopIteration):
- cursor.next()
- assert cursor.close(ignore_missing=True) is False
+ assert cursor.count() == len(cursor) == 2
+ assert clean_doc(cursor.batch()) == []
- incorrect_cursor_data = {'id': 'invalid', 'hasMore': True, 'result': []}
- setattr(cursor, '_data', incorrect_cursor_data)
- with pytest.raises(CursorCloseError):
+ with pytest.raises(CursorCloseError) as err:
cursor.close(ignore_missing=False)
- with pytest.raises(CursorNextError):
- cursor.next()
+ assert err.value.error_code == 1600
+ assert cursor.close(ignore_missing=True) is False
-@pytest.mark.order10
-def test_write_cursor_early_finish():
- global cursor, cursor_id
- col.truncate()
- col.import_bulk([doc1, doc2, doc3])
+def test_cursor_invalid_id(db, col):
cursor = db.aql.execute(
- '''
- FOR d IN {col} FILTER d._key == @first OR d._key == @second
- UPDATE {{_key: d._key, _val: @val }} IN {col}
- RETURN NEW
- '''.format(col=col_name),
- bind_vars={'first': '1', 'second': '2', 'val': 42},
+ 'FOR d IN {} SORT d._key RETURN d'.format(col.name),
count=True,
- batch_size=1,
+ batch_size=2,
ttl=1000,
- optimizer_rules=['+all']
+ optimizer_rules=['+all'],
+ profile=True
)
- assert cursor.close() is True
- with pytest.raises(CursorCloseError):
+ # Set the cursor ID to "invalid" and assert errors
+ setattr(cursor, '_id', 'invalid')
+
+ with pytest.raises(CursorNextError) as err:
+ list(cursor)
+ assert err.value.error_code == 1600
+
+ with pytest.raises(CursorCloseError) as err:
cursor.close(ignore_missing=False)
+ assert err.value.error_code == 1600
assert cursor.close(ignore_missing=True) is False
- col.truncate()
- col.import_bulk([doc1, doc2, doc3, doc4])
+ # Set the cursor ID to None and assert errors
+ setattr(cursor, '_id', None)
- cursor = db.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
- count=False,
- batch_size=1,
- ttl=1000,
- optimizer_rules=['+all']
- )
+ with pytest.raises(CursorStateError) as err:
+ cursor.next()
+ assert err.value.message == 'cursor ID not set'
+ with pytest.raises(CursorStateError) as err:
+ cursor.close()
+ assert err.value.message == 'cursor ID not set'
-@pytest.mark.order11
-def test_cursor_context_manager():
- global cursor, cursor_id
+ with pytest.raises(CursorStateError) as err:
+ cursor.fetch()
+ assert err.value.message == 'cursor ID not set'
- col.truncate()
- col.import_bulk([doc1, doc2, doc3])
- with db.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
- count=False,
+def test_cursor_premature_close(db, col, docs):
+ cursor = db.aql.execute(
+ 'FOR d IN {} SORT d._key RETURN d'.format(col.name),
+ count=True,
batch_size=2,
ttl=1000,
- optimizer_rules=['+all']
- ) as cursor:
- assert clean_keys(cursor.next()) == doc1
- with pytest.raises(CursorCloseError):
+ optimizer_rules=['+all'],
+ profile=True
+ )
+ assert clean_doc(cursor.batch()) == docs[:2]
+ assert cursor.close() is True
+ with pytest.raises(CursorCloseError) as err:
cursor.close(ignore_missing=False)
+ assert err.value.error_code == 1600
+ assert cursor.close(ignore_missing=True) is False
+
+def test_cursor_context_manager(db, col, docs):
with db.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
- count=False,
- batch_size=2,
- ttl=1000,
- optimizer_rules=['+all']
+ 'FOR d IN {} SORT d._key RETURN d'.format(col.name),
+ count=True,
+ batch_size=2,
+ ttl=1000,
+ optimizer_rules=['+all'],
+ profile=True
) as cursor:
- assert clean_keys(cursor.__next__()) == doc1
- with pytest.raises(CursorCloseError):
+ assert clean_doc(cursor.next()) == docs[0]
+
+ with pytest.raises(CursorCloseError) as err:
cursor.close(ignore_missing=False)
+ assert err.value.error_code == 1600
assert cursor.close(ignore_missing=True) is False
-@pytest.mark.order12
-def test_cursor_repr_no_id():
- col.truncate()
- col.import_bulk([doc1, doc2, doc3, doc4])
+def test_cursor_manual_fetch_and_pop(db, col, docs):
cursor = db.aql.execute(
- 'FOR d IN {} RETURN d'.format(col_name),
+ 'FOR d IN {} SORT d._key RETURN d'.format(col.name),
count=True,
+ batch_size=1,
+ ttl=1000,
+ optimizer_rules=['+all'],
+ profile=True
+ )
+ for size in range(2, 6):
+ result = cursor.fetch()
+ assert result['id'] == cursor.id
+ assert result['count'] == len(docs)
+ assert result['cached'] == cursor.cached()
+ assert result['has_more'] == cursor.has_more()
+ assert result['profile'] == cursor.profile()
+ assert result['warnings'] == cursor.warnings()
+ assert result['statistics'] == cursor.statistics()
+ assert len(result['batch']) > 0
+ assert cursor.count() == len(docs)
+ assert cursor.has_more()
+ assert len(cursor.batch()) == size
+
+ cursor.fetch()
+ assert len(cursor.batch()) == 6
+ assert not cursor.has_more()
+
+ while not cursor.empty():
+ cursor.pop()
+ assert len(cursor.batch()) == 0
+
+ with pytest.raises(CursorEmptyError) as err:
+ cursor.pop()
+ assert err.value.message == 'current batch is empty'
+
+
+def test_cursor_no_count(db, col):
+ cursor = db.aql.execute(
+ 'FOR d IN {} SORT d._key RETURN d'.format(col.name),
+ count=False,
batch_size=2,
ttl=1000,
- optimizer_rules=['+all']
+ optimizer_rules=['+all'],
+ profile=True
)
- getattr(cursor, '_data')['id'] = None
- assert repr(cursor) == ''
+ while cursor.has_more():
+ assert cursor.count() is None
+ assert cursor.fetch()
diff --git a/tests/test_database.py b/tests/test_database.py
index b43fef85..5448e74e 100644
--- a/tests/test_database.py
+++ b/tests/test_database.py
@@ -2,294 +2,96 @@
from datetime import datetime
-import pytest
from six import string_types
-from arango import ArangoClient
-from arango.collections import Collection
-from arango.graph import Graph
-from arango.exceptions import *
-
-from .utils import (
- generate_db_name,
- generate_col_name,
- generate_graph_name,
- arango_version
+from arango.exceptions import (
+ DatabaseCreateError,
+ DatabaseDeleteError,
+ DatabaseListError,
+ DatabasePropertiesError,
+ ServerDetailsError,
+ ServerEchoError,
+ ServerEndpointsError,
+ ServerLogLevelError,
+ ServerLogLevelSetError,
+ ServerReadLogError,
+ ServerReloadRoutingError,
+ ServerRequiredDBVersionError,
+ ServerRoleError,
+ ServerStatisticsError,
+ ServerTimeError,
+ ServerVersionError,
+ ServerEngineError
)
+from tests.helpers import assert_raises, generate_db_name
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-bad_db_name = generate_db_name()
-bad_db = arango_client.db(bad_db_name)
-col_name_1 = generate_col_name()
-col_name_2 = ''
-db.create_collection(col_name_1)
-graph_name = generate_graph_name()
-db.create_graph(graph_name)
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
+def test_database_attributes(db, username):
+ assert db.context in ['default', 'async', 'batch', 'transaction']
+ assert db.username == username
+ assert db.db_name == db.name
+ assert db.name.startswith('test_database')
+ assert repr(db) == ''.format(db.name)
-@pytest.mark.order1
-def test_properties():
- assert db.name == db_name
- assert repr(db) == ''.format(db_name)
+def test_database_misc_methods(db, bad_db):
+ # Test get properties
properties = db.properties()
assert 'id' in properties
assert 'path' in properties
+ assert properties['name'] == db.name
assert properties['system'] is False
- assert properties['name'] == db_name
- assert 'ArangoDB connection' in repr(db.connection)
- with pytest.raises(DatabasePropertiesError):
+ # Test get properties with bad database
+ with assert_raises(DatabasePropertiesError) as err:
bad_db.properties()
+ assert err.value.error_code == 1228
+ # Test get server version
+ assert isinstance(db.version(), string_types)
-@pytest.mark.order2
-def test_list_collections():
- assert all(
- col['name'] == col_name_1 or col['name'].startswith('_')
- for col in db.collections()
- )
-
- with pytest.raises(CollectionListError):
- bad_db.collections()
-
-
-@pytest.mark.order3
-def test_get_collection():
- for col in [db.collection(col_name_1), db[col_name_1]]:
- assert isinstance(col, Collection)
- assert col.name == col_name_1
-
-
-@pytest.mark.order4
-def test_create_collection():
- global col_name_2
-
- # Test create duplicate collection
- with pytest.raises(CollectionCreateError):
- db.create_collection(col_name_1)
-
- # Test create collection with parameters
- col_name_2 = generate_col_name()
- col = db.create_collection(
- name=col_name_2,
- sync=True,
- compact=False,
- journal_size=7774208,
- system=False,
- volatile=False,
- key_generator="autoincrement",
- user_keys=False,
- key_increment=9,
- key_offset=100,
- edge=True,
- shard_count=2,
- shard_fields=["test_attr"],
- index_bucket_count=10,
- replication_factor=1
- )
- properties = col.properties()
- assert 'id' in properties
- assert properties['name'] == col_name_2
- assert properties['sync'] is True
- assert properties['compact'] is False
- assert properties['journal_size'] == 7774208
- assert properties['system'] is False
- assert properties['volatile'] is False
- assert properties['edge'] is True
- assert properties['keygen'] == 'autoincrement'
- assert properties['user_keys'] is False
- assert properties['key_increment'] == 9
- assert properties['key_offset'] == 100
-
-
-@pytest.mark.order5
-def test_create_system_collection():
- major, minor = arango_version(arango_client)
- if major == 3 and minor >= 1:
-
- system_col_name = '_' + col_name_1
- col = db.create_collection(
- name=system_col_name,
- system=True,
- )
- properties = col.properties()
- assert properties['system'] is True
- assert system_col_name in [c['name'] for c in db.collections()]
- assert db.collection(system_col_name).properties()['system'] is True
-
- with pytest.raises(CollectionDeleteError):
- db.delete_collection(system_col_name)
- assert system_col_name in [c['name'] for c in db.collections()]
-
- db.delete_collection(system_col_name, system=True)
- assert system_col_name not in [c['name'] for c in db.collections()]
-
-
-@pytest.mark.order6
-def test_delete_collection():
- # Test drop collection
- result = db.delete_collection(col_name_2)
- assert result is True
- assert col_name_2 not in set(c['name'] for c in db.collections())
-
- # Test drop missing collection
- with pytest.raises(CollectionDeleteError):
- db.delete_collection(col_name_2)
-
- # Test drop missing collection (ignore_missing)
- result = db.delete_collection(col_name_2, ignore_missing=True)
- assert result is False
-
-
-@pytest.mark.order7
-def test_list_graphs():
- graphs = db.graphs()
- assert len(graphs) == 1
-
- graph = graphs[0]
- assert graph['name'] == graph_name
- assert graph['edge_definitions'] == []
- assert graph['orphan_collections'] == []
- assert 'revision' in graph
-
- with pytest.raises(GraphListError):
- bad_db.graphs()
-
-
-@pytest.mark.order8
-def test_get_graph():
- graph = db.graph(graph_name)
- assert isinstance(graph, Graph)
- assert graph.name == graph_name
-
-
-@pytest.mark.order9
-def test_create_graph():
- # Test create duplicate graph
- with pytest.raises(GraphCreateError):
- db.create_graph(graph_name)
-
- new_graph_name = generate_graph_name()
- db.create_graph(new_graph_name)
- assert new_graph_name in [g['name'] for g in db.graphs()]
-
-
-@pytest.mark.order10
-def test_delete_graph():
- # Test delete graph from the last test
- result = db.delete_graph(graph_name)
- assert result is True
- assert graph_name not in db.graphs()
-
- # Test delete missing graph
- with pytest.raises(GraphDeleteError):
- db.delete_graph(graph_name)
-
- # Test delete missing graph (ignore_missing)
- result = db.delete_graph(graph_name, ignore_missing=True)
- assert result is False
-
- major, minor = arango_version(arango_client)
-
- if major == 3 and minor >= 1:
- # Create a graph with vertex and edge collections and delete them all
- new_graph_name = generate_graph_name()
- graph = db.create_graph(new_graph_name)
- vcol_name_1 = generate_col_name()
- graph.create_vertex_collection(vcol_name_1)
- vcol_name_2 = generate_col_name()
- graph.create_vertex_collection(vcol_name_2)
- ecol_name = generate_col_name()
- graph.create_edge_definition(
- name=ecol_name,
- from_collections=[vcol_name_1],
- to_collections=[vcol_name_2]
- )
- collections = set(col['name'] for col in db.collections())
- assert vcol_name_1 in collections
- assert vcol_name_2 in collections
- assert ecol_name in collections
-
- db.delete_graph(new_graph_name)
- collections = set(col['name'] for col in db.collections())
- assert vcol_name_1 in collections
- assert vcol_name_2 in collections
- assert ecol_name in collections
-
- graph = db.create_graph(new_graph_name)
- graph.create_edge_definition(
- name=ecol_name,
- from_collections=[vcol_name_1],
- to_collections=[vcol_name_2]
- )
- db.delete_graph(new_graph_name, drop_collections=True)
- collections = set(col['name'] for col in db.collections())
- assert vcol_name_1 not in collections
- assert vcol_name_2 not in collections
- assert ecol_name not in collections
-
-
-@pytest.mark.order11
-def test_verify():
- assert db.verify() is True
- with pytest.raises(ServerConnectionError):
- bad_db.verify()
-
-
-@pytest.mark.order12
-def test_version():
- version = db.version()
- assert isinstance(version, string_types)
-
- with pytest.raises(ServerVersionError):
+ # Test get server version with bad database
+ with assert_raises(ServerVersionError) as err:
bad_db.version()
+ assert err.value.error_code == 1228
-
-@pytest.mark.order13
-def test_details():
+ # Test get server details
details = db.details()
assert 'architecture' in details
assert 'server-version' in details
- with pytest.raises(ServerDetailsError):
+ # Test get server details with bad database
+ with assert_raises(ServerDetailsError) as err:
bad_db.details()
+ assert err.value.error_code == 1228
-
-@pytest.mark.order14
-def test_required_db_version():
+ # Test get server required database version
version = db.required_db_version()
assert isinstance(version, string_types)
- with pytest.raises(ServerRequiredDBVersionError):
+ # Test get server target version with bad database
+ with assert_raises(ServerRequiredDBVersionError):
bad_db.required_db_version()
-
-@pytest.mark.order15
-def test_statistics():
+ # Test get server statistics
statistics = db.statistics(description=False)
assert isinstance(statistics, dict)
assert 'time' in statistics
assert 'system' in statistics
assert 'server' in statistics
+ # Test get server statistics with description
description = db.statistics(description=True)
assert isinstance(description, dict)
assert 'figures' in description
assert 'groups' in description
- with pytest.raises(ServerStatisticsError):
+ # Test get server statistics with bad database
+ with assert_raises(ServerStatisticsError) as err:
bad_db.statistics()
+ assert err.value.error_code == 1228
-
-@pytest.mark.order16
-def test_role():
+ # Test get server role
assert db.role() in {
'SINGLE',
'COORDINATOR',
@@ -297,62 +99,40 @@ def test_role():
'SECONDARY',
'UNDEFINED'
}
- with pytest.raises(ServerRoleError):
- bad_db.role()
+ # Test get server role with bad database
+ with assert_raises(ServerRoleError) as err:
+ bad_db.role()
+ assert err.value.error_code == 1228
-@pytest.mark.order17
-def test_time():
- system_time = db.time()
- assert isinstance(system_time, datetime)
+ # Test get server time
+ assert isinstance(db.time(), datetime)
- with pytest.raises(ServerTimeError):
+ # Test get server time with bad database
+ with assert_raises(ServerTimeError) as err:
bad_db.time()
+ assert err.value.error_code == 1228
-
-@pytest.mark.order18
-def test_echo():
+ # Test echo (get last request)
last_request = db.echo()
assert 'protocol' in last_request
assert 'user' in last_request
assert 'requestType' in last_request
assert 'rawRequestBody' in last_request
- with pytest.raises(ServerEchoError):
+ # Test echo with bad database
+ with assert_raises(ServerEchoError) as err:
bad_db.echo()
+ assert err.value.error_code == 1228
-
-@pytest.mark.order19
-def test_sleep():
- assert db.sleep(0) == 0
-
- with pytest.raises(ServerSleepError):
- bad_db.sleep(0)
-
-
-@pytest.mark.order20
-def test_execute():
- major, minor = arango_version(arango_client)
-
- # TODO ArangoDB 3.2 seems to be missing this API endpoint
- if not (major == 3 and minor == 2):
- assert db.execute('return 1') == '1'
- assert db.execute('return "test"') == '"test"'
- with pytest.raises(ServerExecuteError) as err:
- db.execute('return invalid')
- assert 'Internal Server Error' in err.value.message
-
-
-@pytest.mark.order21
-def test_log():
- # Test read_log with default arguments
+ # Test read_log with default parameters
log = db.read_log(upto='fatal')
assert 'lid' in log
assert 'level' in log
assert 'text' in log
assert 'total_amount' in log
- # Test read_log with specific arguments
+ # Test read_log with specific parameters
log = db.read_log(
level='error',
start=0,
@@ -366,49 +146,100 @@ def test_log():
assert 'text' in log
assert 'total_amount' in log
- # Test read_log with incorrect auth
- with pytest.raises(ServerReadLogError):
+ # Test read_log with bad database
+ with assert_raises(ServerReadLogError) as err:
bad_db.read_log()
+ assert err.value.error_code == 1228
+ # Test reload routing
+ assert isinstance(db.reload_routing(), bool)
-@pytest.mark.order22
-def test_reload_routing():
- result = db.reload_routing()
- assert isinstance(result, bool)
-
- with pytest.raises(ServerReloadRoutingError):
+ # Test reload routing with bad database
+ with assert_raises(ServerReloadRoutingError) as err:
bad_db.reload_routing()
+ assert err.value.error_code == 1228
+ # Test get log levels
+ assert isinstance(db.log_levels(), dict)
-@pytest.mark.order23
-def test_log_levels():
- major, minor = arango_version(arango_client)
- if major == 3 and minor >= 1:
+ # Test get log levels with bad database
+ with assert_raises(ServerLogLevelError) as err:
+ bad_db.log_levels()
+ assert err.value.error_code == 1228
- result = db.log_levels()
- assert isinstance(result, dict)
-
- with pytest.raises(ServerLogLevelError):
- bad_db.log_levels()
-
-
-@pytest.mark.order24
-def test_set_log_levels():
- major, minor = arango_version(arango_client)
- if major == 3 and minor >= 1:
-
- new_levels = {
- 'agency': 'DEBUG',
- 'collector': 'INFO',
- 'threads': 'WARNING'
- }
- result = db.set_log_levels(**new_levels)
-
- for key, value in new_levels.items():
- assert result[key] == value
-
- for key, value in db.log_levels().items():
- assert result[key] == value
-
- with pytest.raises(ServerLogLevelSetError):
- bad_db.set_log_levels(**new_levels)
+ # Test set log levels
+ new_levels = {
+ 'agency': 'DEBUG',
+ 'collector': 'INFO',
+ 'threads': 'WARNING'
+ }
+ result = db.set_log_levels(**new_levels)
+ for key, value in new_levels.items():
+ assert result[key] == value
+ for key, value in db.log_levels().items():
+ assert result[key] == value
+
+ # Test set log levels with bad database
+ with assert_raises(ServerLogLevelSetError):
+ bad_db.set_log_levels(**new_levels)
+
+ # Test get server endpoints
+ with assert_raises(ServerEndpointsError) as err:
+ db.endpoints()
+ assert err.value.error_code in [11]
+
+ # Test get server endpoints with bad database
+ with assert_raises(ServerEndpointsError) as err:
+ bad_db.endpoints()
+ assert err.value.error_code == 1228
+
+ # Test get storage engine
+ engine = db.engine()
+ assert engine['name'] in ['mmfiles', 'rocksdb']
+ assert 'supports' in engine
+
+ # Test get storage engine with bad database
+ with assert_raises(ServerEngineError) as err:
+ bad_db.engine()
+ assert err.value.error_code == 1228
+
+
+def test_database_management(db, sys_db, bad_db):
+ # Test list databases
+ result = sys_db.databases()
+ assert '_system' in result
+
+ # Test list databases with bad database
+ with assert_raises(DatabaseListError):
+ bad_db.databases()
+
+ # Test create database
+ db_name = generate_db_name()
+ assert sys_db.has_database(db_name) is False
+ assert sys_db.create_database(db_name) is True
+ assert sys_db.has_database(db_name) is True
+
+ # Test create duplicate database
+ with assert_raises(DatabaseCreateError) as err:
+ sys_db.create_database(db_name)
+ assert err.value.error_code == 1207
+
+ # Test create database without permissions
+ with assert_raises(DatabaseCreateError) as err:
+ db.create_database(db_name)
+ assert err.value.error_code == 1230
+
+ # Test delete database without permissions
+ with assert_raises(DatabaseDeleteError) as err:
+ db.delete_database(db_name)
+ assert err.value.error_code == 1230
+
+ # Test delete database
+ assert sys_db.delete_database(db_name) is True
+ assert db_name not in sys_db.databases()
+
+ # Test delete missing database
+ with assert_raises(DatabaseDeleteError) as err:
+ sys_db.delete_database(db_name)
+ assert err.value.error_code == 1228
+ assert sys_db.delete_database(db_name, ignore_missing=True) is False
diff --git a/tests/test_document.py b/tests/test_document.py
index 98ef8928..47eda478 100644
--- a/tests/test_document.py
+++ b/tests/test_document.py
@@ -1,87 +1,78 @@
from __future__ import absolute_import, unicode_literals
-import pytest
from six import string_types
-from arango import ArangoClient
-from arango.exceptions import *
-
-from .utils import (
- generate_db_name,
- generate_col_name,
- clean_keys,
- ordered
+from arango.exceptions import (
+ DocumentCountError,
+ DocumentDeleteError,
+ DocumentGetError,
+ DocumentInError,
+ DocumentInsertError,
+ DocumentReplaceError,
+ DocumentRevisionError,
+ DocumentUpdateError,
+ DocumentKeysError,
+ DocumentIDsError,
+ DocumentParseError,
+)
+from tests.helpers import (
+ assert_raises,
+ clean_doc,
+ extract,
+ generate_doc_key,
+ generate_col_name
)
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-edge_col_name = generate_col_name()
-edge_col = db.create_collection(edge_col_name, edge=True)
-geo_index = col.add_geo_index(['coordinates'])
-bad_db = arango_client.database(db_name, password='invalid')
-bad_col_name = generate_col_name()
-bad_col = db.collection(bad_col_name)
-
-doc1 = {'_key': '1', 'val': 100, 'text': 'foo', 'coordinates': [1, 1]}
-doc2 = {'_key': '2', 'val': 100, 'text': 'bar', 'coordinates': [2, 2]}
-doc3 = {'_key': '3', 'val': 100, 'text': 'baz', 'coordinates': [3, 3]}
-doc4 = {'_key': '4', 'val': 200, 'text': 'foo', 'coordinates': [4, 4]}
-doc5 = {'_key': '5', 'val': 300, 'text': 'foo', 'coordinates': [5, 5]}
-test_docs = [doc1, doc2, doc3, doc4, doc5]
-test_doc_keys = [d['_key'] for d in test_docs]
-
-edge1 = {'_key': '1', '_to': '1', '_from': '2'}
-edge2 = {'_key': '2', '_to': '2', '_from': '3'}
-edge3 = {'_key': '3', '_to': '3', '_from': '4'}
-edge4 = {'_key': '4', '_to': '4', '_from': '5'}
-edge5 = {'_key': '5', '_to': '5', '_from': '1'}
-test_edges = [edge1, edge2, edge3, edge4, edge5]
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
-
-
-def setup_function(*_):
+
+def test_document_insert(col, docs):
+ # Test insert document with no key
+ result = col.insert({})
+ assert result['_key'] in col
+ assert len(col) == 1
+ col.truncate()
+
+ # Test insert document with ID
+ doc_id = col.name + '/' + 'foo'
+ col.insert({'_id': doc_id})
+ assert 'foo' in col
+ assert doc_id in col
+ assert len(col) == 1
col.truncate()
+ with assert_raises(DocumentParseError) as err:
+ col.insert({'_id': generate_col_name() + '/' + 'foo'})
+ assert 'bad collection name' in err.value.message
-def test_insert():
# Test insert with default options
- for doc in test_docs:
+ for doc in docs:
result = col.insert(doc)
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
assert col[doc['_key']]['val'] == doc['val']
- assert len(col) == 5
+ assert len(col) == len(docs)
col.truncate()
- # Test insert with sync
- doc = doc1
+ # Test insert with sync set to True
+ doc = docs[0]
result = col.insert(doc, sync=True)
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is True
assert col[doc['_key']]['_key'] == doc['_key']
assert col[doc['_key']]['val'] == doc['val']
- # Test insert without sync
- doc = doc2
+ # Test insert with sync set to False
+ doc = docs[1]
result = col.insert(doc, sync=False)
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
assert col[doc['_key']]['_key'] == doc['_key']
assert col[doc['_key']]['val'] == doc['val']
- # Test insert with return_new
- doc = doc3
+ # Test insert with return_new set to True
+ doc = docs[2]
result = col.insert(doc, return_new=True)
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
@@ -93,8 +84,8 @@ def test_insert():
assert col[doc['_key']]['_key'] == doc['_key']
assert col[doc['_key']]['val'] == doc['val']
- # Test insert without return_new
- doc = doc4
+ # Test insert with return_new set to False
+ doc = docs[3]
result = col.insert(doc, return_new=False)
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
@@ -103,47 +94,62 @@ def test_insert():
assert col[doc['_key']]['_key'] == doc['_key']
assert col[doc['_key']]['val'] == doc['val']
+ # Test insert with silent set to True
+ doc = docs[4]
+ assert col.insert(doc, silent=True) is True
+ assert col[doc['_key']]['_key'] == doc['_key']
+ assert col[doc['_key']]['val'] == doc['val']
+
# Test insert duplicate document
- with pytest.raises(DocumentInsertError):
- col.insert(doc4)
+ with assert_raises(DocumentInsertError) as err:
+ col.insert(doc)
+ assert err.value.error_code == 1210
-def test_insert_many():
+def test_document_insert_many(col, bad_col, docs):
# Test insert_many with default options
- results = col.insert_many(test_docs)
- for result, doc in zip(results, test_docs):
+ results = col.insert_many(docs)
+ for result, doc in zip(results, docs):
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
assert col[doc['_key']]['val'] == doc['val']
- assert len(col) == 5
+ assert len(col) == len(docs)
col.truncate()
- # Test insert_many with sync
- results = col.insert_many(test_docs, sync=True)
- for result, doc in zip(results, test_docs):
+ # Test insert_many with document IDs
+ docs_with_id = [{'_id': col.name + '/' + doc['_key']} for doc in docs]
+ results = col.insert_many(docs_with_id)
+ for result, doc in zip(results, docs):
+ assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
+ assert result['_key'] == doc['_key']
+ assert isinstance(result['_rev'], string_types)
+ assert len(col) == len(docs)
+ col.truncate()
+
+ # Test insert_many with sync set to True
+ results = col.insert_many(docs, sync=True)
+ for result, doc in zip(results, docs):
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is True
assert col[doc['_key']]['_key'] == doc['_key']
assert col[doc['_key']]['val'] == doc['val']
col.truncate()
- # Test insert_many without sync
- results = col.insert_many(test_docs, sync=False)
- for result, doc in zip(results, test_docs):
+ # Test insert_many with sync set to False
+ results = col.insert_many(docs, sync=False)
+ for result, doc in zip(results, docs):
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
assert col[doc['_key']]['_key'] == doc['_key']
assert col[doc['_key']]['val'] == doc['val']
col.truncate()
- # Test insert_many with return_new
- results = col.insert_many(test_docs, return_new=True)
- for result, doc in zip(results, test_docs):
+ # Test insert_many with return_new set to True
+ results = col.insert_many(docs, return_new=True)
+ for result, doc in zip(results, docs):
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
@@ -155,811 +161,870 @@ def test_insert_many():
assert col[doc['_key']]['val'] == doc['val']
col.truncate()
- # Test insert_many without return_new
- results = col.insert_many(test_docs, return_new=False)
- for result, doc in zip(results, test_docs):
+ # Test insert_many with return_new set to False
+ results = col.insert_many(docs, return_new=False)
+ for result, doc in zip(results, docs):
assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
assert 'new' not in result
assert col[doc['_key']]['_key'] == doc['_key']
assert col[doc['_key']]['val'] == doc['val']
+ col.truncate()
+
+ # Test insert_many with silent set to True
+ assert col.insert_many(docs, silent=True) is True
+ for doc in docs:
+ assert col[doc['_key']]['_key'] == doc['_key']
+ assert col[doc['_key']]['val'] == doc['val']
# Test insert_many duplicate documents
- results = col.insert_many(test_docs, return_new=False)
- for result, doc in zip(results, test_docs):
+ results = col.insert_many(docs, return_new=False)
+ for result, doc in zip(results, docs):
isinstance(result, DocumentInsertError)
-
- # Test get with missing collection
- with pytest.raises(DocumentInsertError):
- bad_col.insert_many(test_docs)
+ # Test get with bad database
+ with assert_raises(DocumentInsertError) as err:
+ bad_col.insert_many(docs)
+ assert err.value.error_code == 1228
-def test_update():
- doc = doc1.copy()
+
+def test_document_update(col, docs):
+ doc = docs[0]
col.insert(doc)
# Test update with default options
doc['val'] = {'foo': 1}
doc = col.update(doc)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert col['1']['val'] == {'foo': 1}
- current_rev = doc['_rev']
+ assert col[doc['_key']]['val'] == {'foo': 1}
+ old_rev = doc['_rev']
- # Test update with merge
+ # Test update with merge set to True
doc['val'] = {'bar': 2}
doc = col.update(doc, merge=True)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert col['1']['val'] == {'foo': 1, 'bar': 2}
- current_rev = doc['_rev']
+ assert doc['_old_rev'] == old_rev
+ assert col[doc['_key']]['val'] == {'foo': 1, 'bar': 2}
+ old_rev = doc['_rev']
- # Test update without merge
+ # Test update with merge set to False
doc['val'] = {'baz': 3}
doc = col.update(doc, merge=False)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert col['1']['val'] == {'baz': 3}
- current_rev = doc['_rev']
+ assert doc['_old_rev'] == old_rev
+ assert col[doc['_key']]['val'] == {'baz': 3}
+ old_rev = doc['_rev']
- # Test update with keep_none
+ # Test update with keep_none set to True
doc['val'] = None
doc = col.update(doc, keep_none=True)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert col['1']['val'] is None
- current_rev = doc['_rev']
+ assert doc['_old_rev'] == old_rev
+ assert col[doc['_key']]['val'] is None
+ old_rev = doc['_rev']
- # Test update without keep_none
+ # Test update with keep_none set to False
doc['val'] = None
doc = col.update(doc, keep_none=False)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert 'val' not in col['1']
- current_rev = doc['_rev']
+ assert doc['_old_rev'] == old_rev
+ assert 'val' not in col[doc['_key']]
+ old_rev = doc['_rev']
- # Test update with return_new and return_old
- doc['val'] = 300
+ # Test update with return_new and return_old set to True
+ doc['val'] = 3
doc = col.update(doc, return_new=True, return_old=True)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert doc['new']['_key'] == '1'
- assert doc['new']['val'] == 300
- assert doc['old']['_key'] == '1'
+ assert doc['_old_rev'] == old_rev
+ assert doc['new']['_key'] == doc['_key']
+ assert doc['new']['val'] == 3
+ assert doc['old']['_key'] == doc['_key']
assert 'val' not in doc['old']
- assert col['1']['val'] == 300
- current_rev = doc['_rev']
+ assert col[doc['_key']]['val'] == 3
+ old_rev = doc['_rev']
- # Test update without return_new and return_old
- doc['val'] = 400
+ # Test update with return_new and return_old set to False
+ doc['val'] = 4
doc = col.update(doc, return_new=False, return_old=False)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
+ assert doc['_old_rev'] == old_rev
assert 'new' not in doc
assert 'old' not in doc
- assert col['1']['val'] == 400
- current_rev = doc['_rev']
+ assert col[doc['_key']]['val'] == 4
+ old_rev = doc['_rev']
- # Test update with check_rev
- doc['val'] = 500
- doc['_rev'] = current_rev + '000'
- with pytest.raises(DocumentRevisionError):
+ # Test update with check_rev set to True
+ doc['val'] = 5
+ doc['_rev'] = old_rev + '0'
+ with assert_raises(DocumentRevisionError) as err:
col.update(doc, check_rev=True)
- assert col['1']['val'] == 400
+ assert err.value.error_code == 1200
+ assert col[doc['_key']]['val'] == 4
- # Test update with sync
- doc['val'] = 600
- doc = col.update(doc, sync=True)
+ # Test update with check_rev set to False
+ doc = col.update(doc, check_rev=False)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert doc['sync'] is True
- assert col['1']['val'] == 600
- current_rev = doc['_rev']
-
- # Test update without sync
- doc['val'] = 700
- doc = col.update(doc, sync=False)
+ assert doc['_old_rev'] == old_rev
+ assert col[doc['_key']]['val'] == 5
+ old_rev = doc['_rev']
+
+ # Test update with sync set to True
+ doc['val'] = 6
+ doc = col.update(doc, sync=True, check_rev=False)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert doc['sync'] is False
- assert col['1']['val'] == 700
- current_rev = doc['_rev']
+ assert doc['_old_rev'] == old_rev
+ assert col[doc['_key']]['val'] == 6
+ old_rev = doc['_rev']
+
+ # Test update with sync set to False
+ doc['val'] = 7
+ doc = col.update(doc, sync=False, check_rev=False)
+ assert doc['_id'] == '{}/1'.format(col.name)
+ assert doc['_key'] == doc['_key']
+ assert isinstance(doc['_rev'], string_types)
+ assert doc['_old_rev'] == old_rev
+ assert col[doc['_key']]['val'] == 7
+ old_rev = doc['_rev']
# Test update missing document
- with pytest.raises(DocumentUpdateError):
- col.update(doc2)
- assert '2' not in col
- assert col['1']['val'] == 700
- assert col['1']['_rev'] == current_rev
+ missing_doc = docs[1]
+ with assert_raises(DocumentUpdateError) as err:
+ col.update(missing_doc)
+ assert err.value.error_code == 1202
+ assert missing_doc['_key'] not in col
+ assert col[doc['_key']]['val'] == 7
+ assert col[doc['_key']]['_rev'] == old_rev
- # Test update in missing collection
- with pytest.raises(DocumentUpdateError):
- bad_col.update(doc)
+ # Test update with silent set to True
+ doc['val'] = 8
+ assert col.update(doc, silent=True) is True
+ assert col[doc['_key']]['val'] == 8
-def test_update_many():
- current_revs = {}
- docs = [doc.copy() for doc in test_docs]
- doc_keys = [doc['_key'] for doc in docs]
+def test_document_update_many(col, bad_col, docs):
col.insert_many(docs)
+ old_revs = {}
+ doc_keys = [d['_key'] for d in docs]
+
# Test update_many with default options
for doc in docs:
- doc['val'] = {'foo': 1}
+ doc['val'] = {'foo': 0}
results = col.update_many(docs)
- for result, key in zip(results, doc_keys):
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ for result, doc_key in zip(results, doc_keys):
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
+ assert isinstance(result['_rev'], string_types)
+ assert col[doc_key]['val'] == {'foo': 0}
+ old_revs[doc_key] = result['_rev']
+
+ # Test update_many with IDs
+ docs_with_ids = [
+ {'_id': col.name + '/' + d['_key'], 'val': {'foo': 1}}
+ for d in docs
+ ]
+ results = col.update_many(docs_with_ids)
+ for result, doc_key in zip(results, doc_keys):
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert col[key]['val'] == {'foo': 1}
- current_revs[key] = result['_rev']
+ assert col[doc_key]['val'] == {'foo': 1}
+ old_revs[doc_key] = result['_rev']
- # Test update_many with merge
+ # Test update_many with merge set to True
for doc in docs:
doc['val'] = {'bar': 2}
results = col.update_many(docs, merge=True)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert col[key]['val'] == {'foo': 1, 'bar': 2}
- current_revs[key] = result['_rev']
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert col[doc_key]['val'] == {'foo': 1, 'bar': 2}
+ old_revs[doc_key] = result['_rev']
- # Test update_many without merge
+ # Test update_many with merge set to False
for doc in docs:
doc['val'] = {'baz': 3}
results = col.update_many(docs, merge=False)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert col[key]['val'] == {'baz': 3}
- current_revs[key] = result['_rev']
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert col[doc_key]['val'] == {'baz': 3}
+ old_revs[doc_key] = result['_rev']
- # Test update_many with keep_none
+ # Test update_many with keep_none set to True
for doc in docs:
doc['val'] = None
results = col.update_many(docs, keep_none=True)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert col[key]['val'] is None
- current_revs[key] = result['_rev']
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert col[doc_key]['val'] is None
+ old_revs[doc_key] = result['_rev']
- # Test update_many without keep_none
+ # Test update_many with keep_none set to False
for doc in docs:
doc['val'] = None
results = col.update_many(docs, keep_none=False)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert 'val' not in col[key]
- current_revs[key] = result['_rev']
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert 'val' not in col[doc_key]
+ old_revs[doc_key] = result['_rev']
- # Test update_many with return_new and return_old
+ # Test update_many with return_new and return_old set to True
for doc in docs:
- doc['val'] = 300
+ doc['val'] = 3
results = col.update_many(docs, return_new=True, return_old=True)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert result['new']['_key'] == key
- assert result['new']['val'] == 300
- assert result['old']['_key'] == key
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert result['new']['_key'] == doc_key
+ assert result['new']['val'] == 3
+ assert result['old']['_key'] == doc_key
assert 'val' not in result['old']
- assert col[key]['val'] == 300
- current_revs[key] = result['_rev']
+ assert col[doc_key]['val'] == 3
+ old_revs[doc_key] = result['_rev']
- # Test update without return_new and return_old
+ # Test update_many with return_new and return_old set to False
for doc in docs:
- doc['val'] = 400
+ doc['val'] = 4
results = col.update_many(docs, return_new=False, return_old=False)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
+ assert result['_old_rev'] == old_revs[doc_key]
assert 'new' not in result
assert 'old' not in result
- assert col[key]['val'] == 400
- current_revs[key] = result['_rev']
+ assert col[doc_key]['val'] == 4
+ old_revs[doc_key] = result['_rev']
- # Test update_many with check_rev
+ # Test update_many with check_rev set to True
for doc in docs:
- doc['val'] = 500
- doc['_rev'] = current_revs[doc['_key']] + '000'
+ doc['val'] = 5
+ doc['_rev'] = old_revs[doc['_key']] + '0'
results = col.update_many(docs, check_rev=True)
- for result, key in zip(results, doc_keys):
+ for result, doc_key in zip(results, doc_keys):
assert isinstance(result, DocumentRevisionError)
for doc in col:
- assert doc['val'] == 400
+ assert doc['val'] == 4
+
+ # Test update_many with check_rev set to False
+ results = col.update_many(docs, check_rev=False)
+ for result, doc in zip(results, docs):
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
+ assert isinstance(result['_rev'], string_types)
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert col[doc_key]['val'] == 5
+ old_revs[doc_key] = result['_rev']
- # Test update_many with sync
+ # Test update_many with sync set to True
for doc in docs:
- doc['val'] = 600
- results = col.update_many(docs, sync=True)
+ doc['val'] = 6
+ results = col.update_many(docs, sync=True, check_rev=False)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert result['sync'] is True
- assert col[key]['val'] == 600
- current_revs[key] = result['_rev']
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert col[doc_key]['val'] == 6
+ old_revs[doc_key] = result['_rev']
- # Test update_many without sync
+ # Test update_many with sync set to False
for doc in docs:
- doc['val'] = 700
- results = col.update_many(docs, sync=False)
+ doc['val'] = 7
+ results = col.update_many(docs, sync=False, check_rev=False)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert result['sync'] is False
- assert col[key]['val'] == 700
- current_revs[key] = result['_rev']
-
- # Test update_many with missing documents
- results = col.update_many([{'_key': '6'}, {'_key': '7'}])
- for result, key in zip(results, doc_keys):
- assert isinstance(result, DocumentUpdateError)
- assert '6' not in col
- assert '7' not in col
- for doc in col:
- assert doc['val'] == 700
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert col[doc_key]['val'] == 7
+ old_revs[doc_key] = result['_rev']
- # Test update_many in missing collection
- with pytest.raises(DocumentUpdateError):
+ # Test update_many with bad database
+ with assert_raises(DocumentUpdateError) as err:
bad_col.update_many(docs)
+ assert err.value.error_code == 1228
+
+ # Test update_many with silent set to True
+ for doc in docs:
+ doc['val'] = 8
+ assert col.update_many(docs, silent=True, check_rev=False) is True
+ for doc in docs:
+ assert col[doc['_key']]['val'] == 8
+ # Test update_many with bad documents
+ with assert_raises(DocumentParseError) as err:
+ bad_col.update_many([{}])
+ assert str(err.value) == 'field "_key" or "_id" required'
-def test_update_match():
- # Test preconditions
- assert col.update_match({'val': 100}, {'foo': 100}) == 0
+def test_document_update_match(col, bad_col, docs):
# Set up test documents
- col.import_bulk(test_docs)
+ col.import_bulk(docs)
# Test update single matching document
- assert col.update_match({'val': 200}, {'foo': 100}) == 1
- assert col['4']['val'] == 200
- assert col['4']['foo'] == 100
+ assert col.update_match({'val': 2}, {'val': 1}) == 1
+ assert col['2']['val'] == 1
# Test update multiple matching documents
- assert col.update_match({'val': 100}, {'foo': 100}) == 3
- for key in ['1', '2', '3']:
- assert col[key]['val'] == 100
- assert col[key]['foo'] == 100
-
- # Test update multiple matching documents with limit
- assert col.update_match(
- {'val': 100},
- {'foo': 200},
- limit=2
- ) == 2
- assert [doc.get('foo') for doc in col].count(200) == 2
+ assert col.update_match({'val': 1}, {'foo': 1}) == 2
+ for doc_key in ['1', '2']:
+ assert col[doc_key]['val'] == 1
+ assert col[doc_key]['foo'] == 1
- # Test unaffected document
- assert col['5']['val'] == 300
- assert 'foo' not in col['5']
+ # Test update multiple matching documents with limit set to 1
+ assert col.update_match({'val': 1}, {'foo': 2}, limit=1) == 1
+ assert [doc.get('foo') for doc in col].count(2) == 1
- # Test update matching documents with sync and keep_none
+ # Test update matching documents with sync and keep_none set to True
assert col.update_match(
- {'val': 300},
+ {'val': 3},
{'val': None},
sync=True,
keep_none=True
) == 1
- assert col['5']['val'] is None
+ assert col['3']['val'] is None
- # Test update matching documents without sync and keep_none
+ # Test update matching documents with sync and keep_none set to False
assert col.update_match(
- {'val': 200},
+ {'val': 1},
{'val': None},
sync=False,
keep_none=False
- ) == 1
- assert 'val' not in col['4']
+ ) == 2
+ assert 'val' not in col['1']
+ assert 'val' not in col['2']
- # Test update matching documents in missing collection
- with pytest.raises(DocumentUpdateError):
- bad_col.update_match({'val': 100}, {'foo': 100})
+ # Test update matching documents with bad database
+ with assert_raises(DocumentUpdateError) as err:
+ bad_col.update_match({'val': 1}, {'foo': 1})
+ assert err.value.error_code == 1228
-def test_replace():
- doc = doc1.copy()
+def test_document_replace(col, docs):
+ doc = docs[0]
col.insert(doc)
# Test replace with default options
- doc['foo'] = 200
+ doc['foo'] = 2
doc.pop('val')
doc = col.replace(doc)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert col['1']['foo'] == 200
- assert 'val' not in col['1']
- current_rev = doc['_rev']
+ assert col[doc['_key']]['foo'] == 2
+ assert 'val' not in col[doc['_key']]
+ old_rev = doc['_rev']
- # Test update with return_new and return_old
- doc['bar'] = 300
+ # Test update with return_new and return_old set to True
+ doc['bar'] = 3
doc = col.replace(doc, return_new=True, return_old=True)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert doc['new']['_key'] == '1'
- assert doc['new']['bar'] == 300
+ assert doc['_old_rev'] == old_rev
+ assert doc['new']['_key'] == doc['_key']
+ assert doc['new']['bar'] == 3
assert 'foo' not in doc['new']
- assert doc['old']['_key'] == '1'
- assert doc['old']['foo'] == 200
+ assert doc['old']['_key'] == doc['_key']
+ assert doc['old']['foo'] == 2
assert 'bar' not in doc['old']
- assert col['1']['bar'] == 300
- assert 'foo' not in col['1']
- current_rev = doc['_rev']
+ assert col[doc['_key']]['bar'] == 3
+ assert 'foo' not in col[doc['_key']]
+ old_rev = doc['_rev']
- # Test update without return_new and return_old
- doc['baz'] = 400
+ # Test update with return_new and return_old set to False
+ doc['baz'] = 4
doc = col.replace(doc, return_new=False, return_old=False)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
+ assert doc['_old_rev'] == old_rev
assert 'new' not in doc
assert 'old' not in doc
- assert col['1']['baz'] == 400
- assert 'bar' not in col['1']
- current_rev = doc['_rev']
-
- # Test replace with check_rev
- doc['foo'] = 500
- doc['_rev'] = current_rev + '000'
- with pytest.raises(DocumentRevisionError):
+ assert col[doc['_key']]['baz'] == 4
+ assert 'bar' not in col[doc['_key']]
+ old_rev = doc['_rev']
+
+ # Test replace with check_rev set to True
+ doc['foo'] = 5
+ doc['_rev'] = old_rev + '0'
+ with assert_raises(DocumentRevisionError):
col.replace(doc, check_rev=True)
- assert 'foo' not in col['1']
- assert col['1']['baz'] == 400
+ assert 'foo' not in col[doc['_key']]
+ assert col[doc['_key']]['baz'] == 4
- # Test replace with sync
- doc['foo'] = 500
- doc = col.replace(doc, sync=True)
+ # Test replace with check_rev set to False
+ doc = col.replace(doc, check_rev=False)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert doc['sync'] is True
- assert col['1']['foo'] == 500
- assert 'baz' not in col['1']
- current_rev = doc['_rev']
-
- # Test replace without sync
- doc['bar'] = 600
- doc = col.replace(doc, sync=False)
+ assert doc['_old_rev'] == old_rev
+ assert col[doc['_key']]['foo'] == 5
+ old_rev = doc['_rev']
+
+ # Test replace with sync set to True
+ doc['foo'] = 6
+ doc = col.replace(doc, sync=True, check_rev=False)
assert doc['_id'] == '{}/1'.format(col.name)
- assert doc['_key'] == '1'
+ assert doc['_key'] == doc['_key']
assert isinstance(doc['_rev'], string_types)
- assert doc['_old_rev'] == current_rev
- assert doc['sync'] is False
- assert col['1']['bar'] == 600
- assert 'foo' not in col['1']
- current_rev = doc['_rev']
+ assert doc['_old_rev'] == old_rev
+ assert col[doc['_key']]['foo'] == 6
+ assert 'baz' not in col[doc['_key']]
+ old_rev = doc['_rev']
+
+ # Test replace with sync set to False
+ doc['bar'] = 7
+ doc = col.replace(doc, sync=False, check_rev=False)
+ assert doc['_id'] == '{}/1'.format(col.name)
+ assert doc['_key'] == doc['_key']
+ assert isinstance(doc['_rev'], string_types)
+ assert doc['_old_rev'] == old_rev
+ assert col[doc['_key']]['bar'] == 7
+ assert 'foo' not in col[doc['_key']]
+ old_rev = doc['_rev']
# Test replace missing document
- with pytest.raises(DocumentReplaceError):
- col.replace(doc2)
- assert col['1']['bar'] == 600
- assert col['1']['_rev'] == current_rev
+ with assert_raises(DocumentReplaceError):
+ col.replace(docs[1])
+ assert col[doc['_key']]['bar'] == 7
+ assert col[doc['_key']]['_rev'] == old_rev
- # Test replace in missing collection
- with pytest.raises(DocumentReplaceError):
- bad_col.replace(doc)
+ # Test replace with silent set to True
+ doc['val'] = 8
+ assert col.replace(doc, silent=True) is True
+ assert col[doc['_key']]['val'] == 8
-def test_replace_many():
- current_revs = {}
- docs = [doc.copy() for doc in test_docs]
+def test_document_replace_many(col, bad_col, docs):
col.insert_many(docs)
+ old_revs = {}
+ doc_keys = list(d['_key'] for d in docs)
+
# Test replace_many with default options
for doc in docs:
- doc['foo'] = 200
+ doc['foo'] = 1
doc.pop('val')
results = col.replace_many(docs)
- for result, key in zip(results, test_doc_keys):
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ for result, doc_key in zip(results, doc_keys):
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
+ assert isinstance(result['_rev'], string_types)
+ assert col[doc_key]['foo'] == 1
+ assert 'val' not in col[doc_key]
+ old_revs[doc_key] = result['_rev']
+
+ # Test replace_many with IDs
+ docs_with_ids = [
+ {'_id': col.name + '/' + d['_key'], 'foo': 2}
+ for d in docs
+ ]
+ results = col.replace_many(docs_with_ids)
+ for result, doc_key in zip(results, doc_keys):
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert col[key]['foo'] == 200
- assert 'val' not in col[key]
- current_revs[key] = result['_rev']
+ assert col[doc_key]['foo'] == 2
+ old_revs[doc_key] = result['_rev']
- # Test update with return_new and return_old
+ # Test update with return_new and return_old set to True
for doc in docs:
- doc['bar'] = 300
+ doc['bar'] = 3
doc.pop('foo')
results = col.replace_many(docs, return_new=True, return_old=True)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert result['new']['_key'] == key
- assert result['new']['bar'] == 300
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert result['new']['_key'] == doc_key
+ assert result['new']['bar'] == 3
assert 'foo' not in result['new']
- assert result['old']['_key'] == key
- assert result['old']['foo'] == 200
+ assert result['old']['_key'] == doc_key
+ assert result['old']['foo'] == 2
assert 'bar' not in result['old']
- assert col[key]['bar'] == 300
- current_revs[key] = result['_rev']
+ assert col[doc_key]['bar'] == 3
+ old_revs[doc_key] = result['_rev']
- # Test update without return_new and return_old
+ # Test update with return_new and return_old set to False
for doc in docs:
- doc['baz'] = 400
+ doc['baz'] = 4
doc.pop('bar')
results = col.replace_many(docs, return_new=False, return_old=False)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
+ assert result['_old_rev'] == old_revs[doc_key]
assert 'new' not in result
assert 'old' not in result
- assert col[key]['baz'] == 400
- assert 'bar' not in col[key]
- current_revs[key] = result['_rev']
+ assert col[doc_key]['baz'] == 4
+ assert 'bar' not in col[doc_key]
+ old_revs[doc_key] = result['_rev']
- # Test replace_many with check_rev
+ # Test replace_many with check_rev set to True
for doc in docs:
- doc['foo'] = 500
+ doc['foo'] = 5
doc.pop('baz')
- doc['_rev'] = current_revs[doc['_key']] + '000'
+ doc['_rev'] = old_revs[doc['_key']] + '0'
results = col.replace_many(docs, check_rev=True)
- for result, key in zip(results, test_doc_keys):
+ for result, doc_key in zip(results, doc_keys):
assert isinstance(result, DocumentRevisionError)
for doc in col:
assert 'foo' not in doc
- assert doc['baz'] == 400
+ assert doc['baz'] == 4
- # Test replace_many with sync
+ # Test replace_many with check_rev set to False
+ results = col.replace_many(docs, check_rev=False)
+ for result, doc in zip(results, docs):
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
+ assert isinstance(result['_rev'], string_types)
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert col[doc_key]['foo'] == 5
+ assert 'baz' not in col[doc_key]
+ old_revs[doc_key] = result['_rev']
+
+ # Test replace_many with sync set to True
for doc in docs:
- doc['foo'] = 500
- results = col.replace_many(docs, sync=True)
+ doc['foo'] = 6
+ results = col.replace_many(docs, sync=True, check_rev=False)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert result['sync'] is True
- assert col[key]['foo'] == 500
- assert 'baz' not in col[key]
- current_revs[key] = result['_rev']
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert col[doc_key]['foo'] == 6
+ old_revs[doc_key] = result['_rev']
- # Test replace_many without sync
+ # Test replace_many with sync set to False
for doc in docs:
- doc['bar'] = 600
+ doc['bar'] = 7
doc.pop('foo')
- results = col.replace_many(docs, sync=False)
+ results = col.replace_many(docs, sync=False, check_rev=False)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['_old_rev'] == current_revs[key]
- assert result['sync'] is False
- assert col[key]['bar'] == 600
- assert 'foo' not in col[key]
- current_revs[key] = result['_rev']
-
- # Test replace_many with missing documents
- results = col.replace_many([{'_key': '6'}, {'_key': '7'}])
- for result, key in zip(results, test_doc_keys):
- assert isinstance(result, DocumentReplaceError)
- assert '6' not in col
- assert '7' not in col
- for doc in col:
- assert doc['bar'] == 600
- assert doc['_rev'] == current_revs[doc['_key']]
+ assert result['_old_rev'] == old_revs[doc_key]
+ assert col[doc_key]['bar'] == 7
+ assert 'foo' not in col[doc_key]
+ old_revs[doc_key] = result['_rev']
- # Test replace_many in missing collection
- with pytest.raises(DocumentReplaceError):
+ # Test replace_many with bad database
+ with assert_raises(DocumentReplaceError) as err:
bad_col.replace_many(docs)
+ assert err.value.error_code == 1228
+
+ # Test replace_many with silent set to True
+ for doc in docs:
+ doc['foo'] = 8
+ doc.pop('bar')
+ assert col.replace_many(docs, silent=True, check_rev=False) is True
+ for doc in docs:
+ doc_key = doc['_key']
+ assert col[doc_key]['foo'] == 8
+ assert 'bar' not in col[doc_key]
+ # Test replace_many with bad documents
+ with assert_raises(DocumentParseError) as err:
+ bad_col.replace_many([{}])
+ assert str(err.value) == 'field "_key" or "_id" required'
-def test_replace_match():
- # Test preconditions
- assert col.replace_match({'val': 100}, {'foo': 100}) == 0
- # Set up test documents
- col.import_bulk(test_docs)
+def test_document_replace_match(col, bad_col, docs):
+ col.import_bulk(docs)
# Test replace single matching document
- assert col.replace_match({'val': 200}, {'foo': 100}) == 1
- assert 'val' not in col['4']
- assert col['4']['foo'] == 100
+ assert col.replace_match({'val': 2}, {'val': 1, 'foo': 1}) == 1
+ assert col['2']['val'] == 1
+ assert col['2']['foo'] == 1
# Test replace multiple matching documents
- assert col.replace_match({'val': 100}, {'foo': 100}) == 3
- for key in ['1', '2', '3']:
- assert 'val' not in col[key]
- assert col[key]['foo'] == 100
-
- # Test replace multiple matching documents with limit
- assert col.replace_match(
- {'foo': 100},
- {'bar': 200},
- limit=2
- ) == 2
- assert [doc.get('bar') for doc in col].count(200) == 2
+ assert col.replace_match({'val': 1}, {'foo': 1}) == 2
+ for doc_key in ['1', '2']:
+ assert 'val' not in col[doc_key]
+ assert col[doc_key]['foo'] == 1
- # Test unaffected document
- assert col['5']['val'] == 300
- assert 'foo' not in col['5']
+ # Test replace multiple matching documents with limit and sync
+ assert col.replace_match({'foo': 1}, {'bar': 2}, limit=1, sync=True) == 1
+ assert [doc.get('bar') for doc in col].count(2) == 1
- # Test replace matching documents in missing collection
- with pytest.raises(DocumentReplaceError):
- bad_col.replace_match({'val': 100}, {'foo': 100})
+ # Test replace matching documents with bad database
+ with assert_raises(DocumentReplaceError) as err:
+ bad_col.replace_match({'val': 1}, {'foo': 1})
+ assert err.value.error_code == 1228
-def test_delete():
+def test_document_delete(col, docs):
# Set up test documents
- col.import_bulk(test_docs)
+ col.import_bulk(docs)
# Test delete (document) with default options
- result = col.delete(doc1)
- assert result['_id'] == '{}/{}'.format(col.name, doc1['_key'])
- assert result['_key'] == doc1['_key']
- assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
- assert 'old' not in result
- assert doc1['_key'] not in col
- assert len(col) == 4
-
- # Test delete (document key) with default options
- result = col.delete(doc2['_key'])
- assert result['_id'] == '{}/{}'.format(col.name, doc2['_key'])
- assert result['_key'] == doc2['_key']
+ doc = docs[0]
+ result = col.delete(doc)
+ assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
+ assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
assert 'old' not in result
- assert doc2['_key'] not in col
- assert len(col) == 3
+ assert doc['_key'] not in col
+ assert len(col) == 5
- # Test delete (document) with return_old
- result = col.delete(doc3, return_old=True)
- assert result['_id'] == '{}/{}'.format(col.name, doc3['_key'])
- assert result['_key'] == doc3['_key']
+ # Test delete (document ID) with return_old set to True
+ doc = docs[1]
+ doc_id = '{}/{}'.format(col.name, doc['_key'])
+ result = col.delete(doc_id, return_old=True)
+ assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
+ assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
- assert result['old']['_key'] == doc3['_key']
- assert result['old']['val'] == 100
- assert doc3['_key'] not in col
- assert len(col) == 2
+ assert result['old']['_key'] == doc['_key']
+ assert result['old']['val'] == doc['val']
+ assert doc['_key'] not in col
+ assert len(col) == 4
- # Test delete (document key) with sync
- result = col.delete(doc4, sync=True)
- assert result['_id'] == '{}/{}'.format(col.name, doc4['_key'])
- assert result['_key'] == doc4['_key']
+ # Test delete (document doc_key) with sync set to True
+ doc = docs[2]
+ result = col.delete(doc, sync=True)
+ assert result['_id'] == '{}/{}'.format(col.name, doc['_key'])
+ assert result['_key'] == doc['_key']
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is True
- assert doc4['_key'] not in col
- assert len(col) == 1
+ assert doc['_key'] not in col
+ assert len(col) == 3
- # Test delete (document) with check_rev
- rev = col[doc5['_key']]['_rev'] + '000'
- bad_doc = doc5.copy()
- bad_doc.update({'_rev': rev})
- with pytest.raises(ArangoError):
+ # Test delete (document) with check_rev set to True
+ doc = docs[3]
+ bad_rev = col[doc['_key']]['_rev'] + '0'
+ bad_doc = doc.copy()
+ bad_doc.update({'_rev': bad_rev})
+ with assert_raises(DocumentRevisionError):
col.delete(bad_doc, check_rev=True)
assert bad_doc['_key'] in col
- assert len(col) == 1
+ assert len(col) == 3
- bad_doc.update({'_rev': 'bad_rev'})
- with pytest.raises(ArangoError):
- col.delete(bad_doc, check_rev=True)
- assert bad_doc['_key'] in col
- assert len(col) == 1
+ # Test delete (document) with check_rev set to False
+ doc = docs[4]
+ bad_rev = col[doc['_key']]['_rev'] + '0'
+ bad_doc = doc.copy()
+ bad_doc.update({'_rev': bad_rev})
+ col.delete(bad_doc, check_rev=False)
+ assert doc['_key'] not in col
+ assert len(col) == 2
- # Test delete (document) with check_rev
- assert col.delete(doc4, ignore_missing=True) is False
- with pytest.raises(ArangoError):
- col.delete(doc4, ignore_missing=False)
- assert len(col) == 1
+ # Test delete missing document
+ bad_key = generate_doc_key()
+ with assert_raises(DocumentDeleteError) as err:
+ col.delete(bad_key, ignore_missing=False)
+ assert err.value.error_code == 1202
+ assert len(col) == 2
+ if col.context != 'transaction':
+ assert col.delete(bad_key, ignore_missing=True) is False
- # Test delete with missing collection
- with pytest.raises(ArangoError):
- bad_col.delete(doc5)
- with pytest.raises(ArangoError):
- bad_col.delete(doc5['_key'])
+ # Test delete (document) with silent set to True
+ doc = docs[5]
+ assert col.delete(doc, silent=True) is True
+ assert doc['_key'] not in col
+ assert len(col) == 1
- # Test delete with wrong user credentials
- with pytest.raises(ArangoError) as err:
- bad_db.collection(col_name).delete(doc5)
- assert isinstance(err.value, DocumentDeleteError) \
- or isinstance(err.value, AsyncExecuteError) \
- or isinstance(err.value, BatchExecuteError)
-def test_delete_many():
+def test_document_delete_many(col, bad_col, docs):
# Set up test documents
- current_revs = {}
- docs = [doc.copy() for doc in test_docs]
+ old_revs = {}
+ doc_keys = [d['_key'] for d in docs]
+ doc_ids = [col.name + '/' + d['_key'] for d in docs]
# Test delete_many (documents) with default options
col.import_bulk(docs)
results = col.delete_many(docs)
- for result, key in zip(results, test_doc_keys):
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ for result, doc_key in zip(results, doc_keys):
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
assert 'old' not in result
- assert key not in col
- current_revs[key] = result['_rev']
+ assert doc_key not in col
+ old_revs[doc_key] = result['_rev']
assert len(col) == 0
- # Test delete_many (document keys) with default options
+ # Test delete_many (documents) with IDs
col.import_bulk(docs)
- results = col.delete_many(docs)
- for result, key in zip(results, test_doc_keys):
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ results = col.delete_many(doc_ids)
+ for result, doc_key in zip(results, doc_keys):
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
assert 'old' not in result
- assert key not in col
- current_revs[key] = result['_rev']
+ assert doc_key not in col
+ old_revs[doc_key] = result['_rev']
assert len(col) == 0
- # Test delete_many (documents) with return_old
+ # Test delete_many (documents) with return_old set to True
col.import_bulk(docs)
results = col.delete_many(docs, return_old=True)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
- assert result['old']['_key'] == key
+ assert result['old']['_key'] == doc_key
assert result['old']['val'] == doc['val']
- assert key not in col
- current_revs[key] = result['_rev']
+ assert doc_key not in col
+ old_revs[doc_key] = result['_rev']
assert len(col) == 0
- # Test delete_many (document keys) with sync
+ # Test delete_many (document doc_keys) with sync set to True
col.import_bulk(docs)
results = col.delete_many(docs, sync=True)
for result, doc in zip(results, docs):
- key = doc['_key']
- assert result['_id'] == '{}/{}'.format(col.name, key)
- assert result['_key'] == key
+ doc_key = doc['_key']
+ assert result['_id'] == '{}/{}'.format(col.name, doc_key)
+ assert result['_key'] == doc_key
assert isinstance(result['_rev'], string_types)
- assert result['sync'] is True
assert 'old' not in result
- assert key not in col
- current_revs[key] = result['_rev']
+ assert doc_key not in col
+ old_revs[doc_key] = result['_rev']
+ assert len(col) == 0
+
+ # Test delete_many with silent set to True
+ col.import_bulk(docs)
+ assert col.delete_many(docs, silent=True) is True
assert len(col) == 0
- # Test delete_many (documents) with check_rev
+ # Test delete_many (documents) with check_rev set to True
col.import_bulk(docs)
for doc in docs:
- doc['_rev'] = current_revs[doc['_key']] + '000'
+ doc['_rev'] = old_revs[doc['_key']] + '0'
results = col.delete_many(docs, check_rev=True)
for result, doc in zip(results, docs):
assert isinstance(result, DocumentRevisionError)
- assert len(col) == 5
+ assert len(col) == 6
# Test delete_many (documents) with missing documents
col.truncate()
- results = col.delete_many([{'_key': '6'}, {'_key': '7'}])
+ results = col.delete_many([
+ {'_key': generate_doc_key()},
+ {'_key': generate_doc_key()},
+ {'_key': generate_doc_key()}
+ ])
for result, doc in zip(results, docs):
assert isinstance(result, DocumentDeleteError)
assert len(col) == 0
- # Test delete_many with missing collection
- with pytest.raises(DocumentDeleteError):
+ # Test delete_many with bad database
+ with assert_raises(DocumentDeleteError) as err:
bad_col.delete_many(docs)
+ assert err.value.error_code == 1228
- with pytest.raises(DocumentDeleteError):
- bad_col.delete_many(test_doc_keys)
-
-
-def test_delete_match():
- # Test preconditions
- assert col.delete_match({'val': 100}) == 0
+def test_document_delete_match(col, bad_col, docs):
# Set up test documents
- col.import_bulk(test_docs)
+ col.import_bulk(docs)
# Test delete matching documents with default options
- assert '4' in col
- assert col.delete_match({'val': 200}) == 1
- assert '4' not in col
+ doc = docs[0]
+ assert doc in col
+ assert col.delete_match(doc) == 1
+ assert doc not in col
# Test delete matching documents with sync
- assert '5' in col
- assert col.delete_match({'val': 300}, sync=True) == 1
- assert '5' not in col
+ doc = docs[1]
+ assert doc in col
+ assert col.delete_match(doc, sync=True) == 1
+ assert doc not in col
# Test delete matching documents with limit of 2
- assert col.delete_match({'val': 100}, limit=2) == 2
- assert [doc['val'] for doc in col].count(100) == 1
+ assert col.delete_match({'text': 'bar'}, limit=2) == 2
+ assert [d['text'] for d in col].count('bar') == 1
- with pytest.raises(DocumentDeleteError):
- bad_col.delete_match({'val': 100})
+ # Test delete matching documents with bad database
+ with assert_raises(DocumentDeleteError) as err:
+ bad_col.delete_match(doc)
+ assert err.value.error_code == 1228
-def test_count():
+def test_document_count(col, bad_col, docs):
# Set up test documents
- col.import_bulk(test_docs)
+ col.import_bulk(docs)
- assert len(col) == len(test_docs)
- assert col.count() == len(test_docs)
+ assert len(col) == len(docs)
+ assert col.count() == len(docs)
- with pytest.raises(DocumentCountError):
+ with assert_raises(DocumentCountError):
len(bad_col)
- with pytest.raises(DocumentCountError):
+ with assert_raises(DocumentCountError):
bad_col.count()
-def test_find():
+def test_document_find(col, bad_col, docs):
# Check preconditions
assert len(col) == 0
# Set up test documents
- col.import_bulk(test_docs)
+ col.import_bulk(docs)
# Test find (single match) with default options
- found = list(col.find({'val': 200}))
+ found = list(col.find({'val': 2}))
assert len(found) == 1
- assert found[0]['_key'] == '4'
+ assert found[0]['_key'] == '2'
# Test find (multiple matches) with default options
- found = list(col.find({'val': 100}))
- assert len(found) == 3
+ col.update_match({'val': 2}, {'val': 1})
+ found = list(col.find({'val': 1}))
+ assert len(found) == 2
for doc in map(dict, found):
- assert doc['_key'] in {'1', '2', '3'}
+ assert doc['_key'] in {'1', '2'}
assert doc['_key'] in col
# Test find with offset
- found = list(col.find({'val': 100}, offset=1))
- assert len(found) == 2
+ found = list(col.find({'val': 1}, skip=1))
+ assert len(found) == 1
for doc in map(dict, found):
assert doc['_key'] in {'1', '2', '3'}
assert doc['_key'] in col
@@ -969,362 +1034,48 @@ def test_find():
found = list(col.find({}, limit=limit))
assert len(found) == limit
for doc in map(dict, found):
- assert doc['_key'] in {'1', '2', '3', '4', '5'}
+ assert doc['_key'] in extract('_key', docs)
assert doc['_key'] in col
# Test find in empty collection
col.truncate()
assert list(col.find({})) == []
- assert list(col.find({'val': 100})) == []
- assert list(col.find({'val': 200})) == []
- assert list(col.find({'val': 300})) == []
- assert list(col.find({'val': 400})) == []
-
- # Test find in missing collection
- with pytest.raises(DocumentGetError):
- bad_col.find({'val': 100})
-
-
-def test_has():
- # Set up test documents
- col.import_bulk(test_docs)
-
- # Test has existing document
- assert col.has('1') is True
-
- # Test has another existing document
- assert col.has('2') is True
-
- # Test has missing document
- assert col.has('6') is False
-
- # Test has with correct revision
- good_rev = col['5']['_rev']
- assert col.has('5', rev=good_rev) is True
-
- # Test has with invalid revision
- bad_rev = col['5']['_rev'] + '000'
- with pytest.raises(ArangoError):
- col.has('5', rev=bad_rev, match_rev=True)
-
- # Test has with correct revision and match_rev turned off
- # bad_rev = col['5']['_rev'] + '000'
- # assert col.has('5', rev=bad_rev, match_rev=False) is True
-
- with pytest.raises(DocumentInError):
- bad_col.has('1')
-
- with pytest.raises(DocumentInError):
- assert '1' in bad_col
-
-
-def test_get():
- # Set up test documents
- col.import_bulk(test_docs)
-
- # Test get existing document
- result = col.get('1')
- assert result['_key'] == '1'
- assert result['val'] == 100
-
- # Test get another existing document
- result = col.get('2')
- assert result['_key'] == '2'
- assert result['val'] == 100
-
- # Test get missing document
- assert col.get('6') is None
-
- # Test get with correct revision
- good_rev = col['5']['_rev']
- result = col.get('5', rev=good_rev)
- assert result['_key'] == '5'
- assert result['val'] == 300
-
- # Test get with invalid revision
- bad_rev = col['5']['_rev'] + '000'
- with pytest.raises(ArangoError):
- col.get('5', rev=bad_rev, match_rev=True)
- with pytest.raises(ArangoError):
- col.get('5', rev='bad_rev')
-
- # TODO uncomment once match_rev flag is fixed
- # # Test get with correct revision and match_rev turned off
- # bad_rev = col['5']['_rev'] + '000'
- # result = col.get('5', rev=bad_rev, match_rev=False)
- # assert result['_key'] == '5'
- # assert result['_rev'] != bad_rev
- # assert result['val'] == 300
-
- # Test get with missing collection
- with pytest.raises(DocumentGetError):
- _ = bad_col.get('1')
-
- with pytest.raises(DocumentGetError):
- _ = bad_col['1']
-
- with pytest.raises(DocumentGetError):
- iter(bad_col)
-
-
-def test_get_from_db():
- # Set up test documents
- col.import_bulk(test_docs)
-
- # Test get existing document
- result = db.get_document(col_name + '/1')
- assert result['_key'] == '1'
- assert result['val'] == 100
-
- # Test get another existing document
- result = db.get_document(col_name + '/2')
- assert result['_key'] == '2'
- assert result['val'] == 100
-
- # Test get missing document
- assert db.get_document(col_name + '/6') is None
-
- # Test get with correct revision
- good_rev = db.get_document(col_name + '/5')['_rev']
- result = db.get_document(col_name + '/5', rev=good_rev)
- assert result['_key'] == '5'
- assert result['val'] == 300
-
- # Test get with invalid revision
- bad_rev = db.get_document(col_name + '/5')['_rev'] + '000'
- with pytest.raises(ArangoError):
- db.get_document(col_name + '/5', rev=bad_rev, match_rev=True)
- with pytest.raises(ArangoError):
- db.get_document(col_name + '/5', rev="bad_rev")
-
- # Test get with missing collection
- with pytest.raises(DocumentGetError):
- _ = db.get_document(bad_col_name + '/1')
-
-
-def test_get_many():
- # Test precondition
- assert len(col) == 0
-
- # Set up test documents
- col.import_bulk(test_docs)
-
- # Test get_many missing documents
- assert col.get_many(['6']) == []
- assert col.get_many(['6', '7']) == []
- assert col.get_many(['6', '7', '8']) == []
-
- # Test get_many existing documents
- result = col.get_many(['1'])
- result = clean_keys(result)
- assert result == [doc1]
-
- result = col.get_many(['2'])
- result = clean_keys(result)
- assert result == [doc2]
-
- result = col.get_many(['3', '4'])
- assert clean_keys(result) == [doc3, doc4]
-
- result = col.get_many(['1', '3', '6'])
- assert clean_keys(result) == [doc1, doc3]
-
- # Test get_many in empty collection
- col.truncate()
- assert col.get_many([]) == []
- assert col.get_many(['1']) == []
- assert col.get_many(['2', '3']) == []
- assert col.get_many(['2', '3', '4']) == []
-
- with pytest.raises(DocumentGetError):
- bad_col.get_many(['2', '3', '4'])
-
-
-def test_all():
- # Check preconditions
- assert len(list(col.export())) == 0
-
- # Set up test documents
- col.import_bulk(test_docs)
-
- # Test all with default options
- result = list(col.all())
- assert ordered(clean_keys(result)) == test_docs
-
- # Test all with a skip of 0
- result = col.all(skip=0)
- assert result.count() == len(test_docs)
- assert ordered(clean_keys(result)) == test_docs
-
- # Test all with a skip of 1
- result = col.all(skip=1)
- assert result.count() == 4
- assert len(list(result)) == 4
- for doc in list(clean_keys(result)):
- assert doc in test_docs
-
- # Test all with a skip of 3
- result = col.all(skip=3)
- assert result.count() == 2
- assert len(list(result)) == 2
- for doc in list(clean_keys(list(result))):
- assert doc in test_docs
-
- # Test all with a limit of 0
- result = col.all(limit=0)
- assert result.count() == 0
- assert ordered(clean_keys(result)) == []
-
- # Test all with a limit of 1
- result = col.all(limit=1)
- assert result.count() == 1
- assert len(list(result)) == 1
- for doc in list(clean_keys(result)):
- assert doc in test_docs
-
- # Test all with a limit of 3
- result = col.all(limit=3)
- assert result.count() == 3
- assert len(list(result)) == 3
- for doc in list(clean_keys(list(result))):
- assert doc in test_docs
-
- # Test all with skip and limit
- result = col.all(skip=4, limit=2)
- assert result.count() == 1
- assert len(list(result)) == 1
- for doc in list(clean_keys(list(result))):
- assert doc in test_docs
-
- # Test export in missing collection
- with pytest.raises(DocumentGetError):
- bad_col.all()
-
-# TODO uncomment when export with flush works properly
-# def test_export():
-# # Check preconditions
-# assert len(list(col.export())) == 0
-#
-# # Set up test documents
-# col.import_bulk(test_docs)
-#
-# # Test export with default options
-# result = list(col.export())
-# assert ordered(clean_keys(result)) == test_docs
-#
-# # Test export with flush
-# # result = list(col.export(flush=True, flush_wait=1))
-# # assert ordered(clean_keys(result)) == test_docs
-#
-# # Test export with count
-# result = col.export(count=True)
-# assert result.count() == len(test_docs)
-# assert ordered(clean_keys(result)) == test_docs
-#
-# # Test export with batch size
-# result = col.export(count=True, batch_size=1)
-# assert result.count() == len(test_docs)
-# assert ordered(clean_keys(result)) == test_docs
-#
-# # Test export with time-to-live
-# result = col.export(count=True, ttl=1000)
-# assert result.count() == len(test_docs)
-# assert ordered(clean_keys(result)) == test_docs
-#
-# # Test export with filters
-# result = col.export(
-# count=True,
-# filter_fields=['text'],
-# filter_type='exclude'
-# )
-# assert result.count() == 5
-# for doc in result:
-# assert 'text' not in doc
-#
-# # Test export with a limit of 0
-# result = col.export(count=True, limit=0)
-# assert result.count() == len(test_docs)
-# assert ordered(clean_keys(result)) == test_docs
-#
-# # Test export with a limit of 1
-# result = col.export(count=True, limit=1)
-# assert result.count() == 1
-# assert len(list(result)) == 1
-# for doc in list(clean_keys(result)):
-# assert doc in test_docs
-#
-# # Test export with a limit of 3
-# result = col.export(count=True, limit=3)
-# assert result.count() == 3
-# assert len(list(result)) == 3
-# for doc in list(clean_keys(list(result))):
-# assert doc in test_docs
-#
-# # Test export in missing collection
-# with pytest.raises(DocumentGetError):
-# bad_col.export()
-#
-# # Test closing export cursor
-# result = col.export(count=True, batch_size=1)
-# assert result.close(ignore_missing=False) is True
-# assert result.close(ignore_missing=True) is False
-#
-# assert clean_keys(result.next()) == doc1
-# with pytest.raises(CursorNextError):
-# result.next()
-# with pytest.raises(CursorCloseError):
-# result.close(ignore_missing=False)
-#
-# result = col.export(count=True)
-# assert result.close(ignore_missing=True) is False
-
-
-def test_random():
- # Set up test documents
- col.import_bulk(test_docs)
-
- # Test random in non-empty collection
- for attempt in range(10):
- random_doc = col.random()
- assert clean_keys(random_doc) in test_docs
-
- # Test random in empty collection
- col.truncate()
- for attempt in range(10):
- random_doc = col.random()
- assert random_doc is None
+ assert list(col.find({'val': 1})) == []
+ assert list(col.find({'val': 2})) == []
+ assert list(col.find({'val': 3})) == []
+ assert list(col.find({'val': 4})) == []
- # Test random in missing collection
- with pytest.raises(DocumentGetError):
- bad_col.random()
+ # Test find with bad database
+ with assert_raises(DocumentGetError) as err:
+ bad_col.find({'val': 1})
+ assert err.value.error_code == 1228
-def test_find_near():
- # Set up test documents
- col.import_bulk(test_docs)
+def test_document_find_near(col, bad_col, docs):
+ col.import_bulk(docs)
# Test find_near with default options
result = col.find_near(latitude=1, longitude=1)
- assert [doc['_key'] for doc in result] == ['1', '2', '3', '4', '5']
+ assert extract('_key', result) == ['1', '2', '3', '4', '5', '6']
# Test find_near with limit of 0
result = col.find_near(latitude=1, longitude=1, limit=0)
- assert [doc['_key'] for doc in result] == []
+ assert extract('_key', result) == ['1', '2', '3', '4', '5', '6']
# Test find_near with limit of 1
result = col.find_near(latitude=1, longitude=1, limit=1)
- assert [doc['_key'] for doc in result] == ['1']
+ assert extract('_key', result) == ['1']
# Test find_near with limit of 3
result = col.find_near(latitude=1, longitude=1, limit=3)
- assert [doc['_key'] for doc in result] == ['1', '2', '3']
+ assert extract('_key', result) == ['1', '2', '3']
# Test find_near with limit of 3 (another set of coordinates)
result = col.find_near(latitude=5, longitude=5, limit=3)
- assert [doc['_key'] for doc in result] == ['5', '4', '3']
+ assert extract('_key', result) == ['4', '5', '6']
- # Test random in missing collection
- with pytest.raises(DocumentGetError):
+ # Test random with bad collection
+ with assert_raises(DocumentGetError):
bad_col.find_near(latitude=1, longitude=1, limit=1)
# Test find_near in an empty collection
@@ -1334,75 +1085,84 @@ def test_find_near():
result = col.find_near(latitude=5, longitude=5, limit=4)
assert list(result) == []
- # Test find near in missing collection
- with pytest.raises(DocumentGetError):
+ # Test find near with bad collection
+ with assert_raises(DocumentGetError) as err:
bad_col.find_near(latitude=1, longitude=1, limit=1)
+ assert err.value.error_code == 1228
-def test_find_in_range():
- # Set up required index
- col.add_skiplist_index(['val'])
-
- # Set up test documents
- col.import_bulk(test_docs)
+def test_document_find_in_range(col, bad_col, docs):
+ col.import_bulk(docs)
# Test find_in_range with default options
- result = col.find_in_range(field='val', lower=100, upper=200)
- assert [doc['_key'] for doc in result] == ['1', '2', '3', '4']
+ result = col.find_in_range('val', lower=1, upper=2)
+ assert extract('_key', result) == ['1']
# Test find_in_range with limit of 0
- result = col.find_in_range(field='val', lower=100, upper=200, limit=0)
- assert [doc['_key'] for doc in result] == []
+ result = col.find_in_range('val', lower=1, upper=2, limit=0)
+ assert extract('_key', result) == ['1']
# Test find_in_range with limit of 3
- result = col.find_in_range(field='val', lower=100, upper=200, limit=3)
- assert [doc['_key'] for doc in result] == ['1', '2', '3']
-
- # Test find_in_range with offset of 0
- result = col.find_in_range(field='val', lower=100, upper=200, offset=0)
- assert [doc['_key'] for doc in result] == ['1', '2', '3', '4']
-
- # Test find_in_range with offset of 2
- result = col.find_in_range(field='val', lower=100, upper=200, offset=2)
- assert [doc['_key'] for doc in result] == ['3', '4']
-
- # Test find_in_range without inclusive
- result = col.find_in_range('val', 100, 200, inclusive=False)
- assert [doc['_key'] for doc in result] == []
-
- # Test find_in_range without inclusive
- result = col.find_in_range('val', 100, 300, inclusive=False)
- assert [doc['_key'] for doc in result] == ['4']
-
- # Test find_in_range in missing collection
- with pytest.raises(DocumentGetError):
- bad_col.find_in_range(field='val', lower=100, upper=200, offset=2)
-
-
-# TODO the WITHIN geo function does not seem to work properly
-def test_find_in_radius():
- col.import_bulk([
- {'_key': '1', 'coordinates': [1, 1]},
- {'_key': '2', 'coordinates': [1, 4]},
- {'_key': '3', 'coordinates': [4, 1]},
- {'_key': '4', 'coordinates': [4, 4]},
- ])
- result = list(col.find_in_radius(3, 3, 10, 'distance'))
- for doc in result:
- assert 'distance' in doc
-
- # Test find_in_radius in missing collection
- with pytest.raises(DocumentGetError):
- bad_col.find_in_radius(3, 3, 10, 'distance')
-
-
-def test_find_in_box():
- # Set up test documents
- d1 = {'_key': '1', 'coordinates': [1, 1]}
- d2 = {'_key': '2', 'coordinates': [1, 5]}
- d3 = {'_key': '3', 'coordinates': [5, 1]}
- d4 = {'_key': '4', 'coordinates': [5, 5]}
- col.import_bulk([d1, d2, d3, d4])
+ result = col.find_in_range('val', lower=1, upper=5, limit=3)
+ assert extract('_key', result) == ['1', '2', '3']
+
+ # Test find_in_range with skip set to 0
+ result = col.find_in_range('val', lower=1, upper=5, skip=0)
+ assert extract('_key', result) == ['1', '2', '3', '4']
+
+ # Test find_in_range with skip set to 3
+ result = col.find_in_range('val', lower=1, upper=5, skip=2)
+ assert extract('_key', result) == ['3', '4']
+
+ # Test find_in_range with bad collection
+ with assert_raises(DocumentGetError) as err:
+ bad_col.find_in_range(field='val', lower=1, upper=2, skip=2)
+ assert err.value.error_code == 1228
+
+
+def test_document_find_in_radius(col, bad_col):
+ doc1 = {'_key': '1', 'loc': [1, 1]}
+ doc2 = {'_key': '2', 'loc': [1, 4]}
+ doc3 = {'_key': '3', 'loc': [4, 1]}
+ doc4 = {'_key': '4', 'loc': [4, 4]}
+
+ col.import_bulk([doc1, doc2, doc3, doc4])
+
+ # Test find_in_radius without distance field
+ result = list(col.find_in_radius(
+ latitude=1,
+ longitude=4,
+ radius=6,
+ ))
+ assert len(result) == 1
+ assert clean_doc(result[0]) == {'_key': '2', 'loc': [1, 4]}
+
+ # Test find_in_radius with distance field
+ result = list(col.find_in_radius(
+ latitude=1,
+ longitude=1,
+ radius=6,
+ distance_field='dist'
+ ))
+ assert len(result) == 1
+ if col.context == 'transaction':
+ assert clean_doc(result[0]) == {'_key': '1', 'loc': [1, 1]}
+ else:
+ assert clean_doc(result[0]) == {'_key': '1', 'loc': [1, 1], 'dist': 0}
+
+ # Test find_in_radius with bad collection
+ with assert_raises(DocumentGetError) as err:
+ bad_col.find_in_radius(3, 3, 10)
+ assert err.value.error_code == 1228
+
+
+def test_document_find_in_box(col, bad_col, geo):
+ doc1 = {'_key': '1', 'loc': [1, 1]}
+ doc2 = {'_key': '2', 'loc': [1, 5]}
+ doc3 = {'_key': '3', 'loc': [5, 1]}
+ doc4 = {'_key': '4', 'loc': [5, 5]}
+
+ col.import_bulk([doc1, doc2, doc3, doc4])
# Test find_in_box with default options
result = col.find_in_box(
@@ -1410,9 +1170,9 @@ def test_find_in_box():
longitude1=0,
latitude2=6,
longitude2=3,
- geo_field=geo_index['id']
+ index=geo['id']
)
- assert clean_keys(result) == [d3, d1]
+ assert clean_doc(result) == [doc1, doc3]
# Test find_in_box with limit of 0
result = col.find_in_box(
@@ -1421,9 +1181,9 @@ def test_find_in_box():
latitude2=6,
longitude2=3,
limit=0,
- geo_field=geo_index['id']
+ index=geo['id']
)
- assert clean_keys(result) == [d3, d1]
+ assert clean_doc(result) == [doc1, doc3]
# Test find_in_box with limit of 1
result = col.find_in_box(
@@ -1433,7 +1193,7 @@ def test_find_in_box():
longitude2=3,
limit=1,
)
- assert clean_keys(result) == [d3]
+ assert clean_doc(result) == [doc3]
# Test find_in_box with limit of 4
result = col.find_in_box(
@@ -1443,7 +1203,7 @@ def test_find_in_box():
longitude2=10,
limit=4
)
- assert clean_keys(result) == [d4, d3, d2, d1]
+ assert clean_doc(result) == [doc1, doc2, doc3, doc4]
# Test find_in_box with skip 1
result = col.find_in_box(
@@ -1453,7 +1213,7 @@ def test_find_in_box():
longitude2=3,
skip=1,
)
- assert clean_keys(result) == [d1]
+ assert clean_doc(result) == [doc1]
# Test find_in_box with skip 3
result = col.find_in_box(
@@ -1463,174 +1223,794 @@ def test_find_in_box():
longitude2=10,
skip=2
)
- assert clean_keys(result) == [d2, d1]
+ assert clean_doc(result) == [doc1, doc2]
- # Test find_in_box in missing collection
- with pytest.raises(DocumentGetError):
+ # Test find_in_box with bad collection
+ with assert_raises(DocumentGetError) as err:
bad_col.find_in_box(
latitude1=0,
longitude1=0,
latitude2=6,
longitude2=3,
)
+ assert err.value.error_code == 1228
-def test_find_by_text():
- # Set up required index
- col.add_fulltext_index(['text'])
-
- # Set up test documents
- col.import_bulk(test_docs)
+def test_document_find_by_text(col, docs):
+ col.import_bulk(docs)
# Test find_by_text with default options
- result = col.find_by_text(key='text', query='bar,|baz')
- assert clean_keys(list(result)) == [doc2, doc3]
+ result = col.find_by_text(field='text', query='foo,|bar')
+ assert clean_doc(result) == docs
# Test find_by_text with limit
- result = col.find_by_text(key='text', query='foo', limit=1)
+ result = col.find_by_text(field='text', query='foo', limit=1)
assert len(list(result)) == 1
- result = col.find_by_text(key='text', query='foo', limit=2)
+
+ result = col.find_by_text(field='text', query='foo', limit=2)
assert len(list(result)) == 2
- result = col.find_by_text(key='text', query='foo', limit=3)
+
+ result = col.find_by_text(field='text', query='foo', limit=3)
assert len(list(result)) == 3
# Test find_by_text with invalid queries
- with pytest.raises(DocumentGetError):
- col.find_by_text(key='text', query='+')
- with pytest.raises(DocumentGetError):
- col.find_by_text(key='text', query='|')
+ with assert_raises(DocumentGetError):
+ col.find_by_text(field='text', query='+')
+ with assert_raises(DocumentGetError):
+ col.find_by_text(field='text', query='|')
# Test find_by_text with missing column
- with pytest.raises(DocumentGetError):
- col.find_by_text(key='missing', query='foo')
+ with assert_raises(DocumentGetError) as err:
+ col.find_by_text(field='missing', query='foo')
+ assert err.value.error_code == 1571
+
+
+def test_document_has(col, bad_col, docs):
+ # Set up test document
+ result = col.insert(docs[0])
+ rev = result['_rev']
+ bad_rev = rev + '0'
+
+ doc_key = docs[0]['_key']
+ doc_id = col.name + '/' + doc_key
+ missing_doc_key = docs[1]['_key']
+ missing_doc_id = col.name + '/' + missing_doc_key
+
+ # Test existing documents without revision or with good revision
+ for doc_input in [
+ doc_key,
+ doc_id,
+ {'_key': doc_key},
+ {'_id': doc_id},
+ {'_id': doc_id, '_key': doc_key},
+ {'_key': doc_key, '_rev': rev},
+ {'_id': doc_id, '_rev': rev},
+ {'_id': doc_id, '_key': doc_key, '_rev': rev},
+ ]:
+ assert doc_input in col
+ assert col.has(doc_input) is True
+ assert col.has(doc_input, rev=rev) is True
+ assert col.has(doc_input, rev=rev, check_rev=True) is True
+ assert col.has(doc_input, rev=rev, check_rev=False) is True
+ assert col.has(doc_input, rev=bad_rev, check_rev=False) is True
+
+ with assert_raises(DocumentRevisionError) as err:
+ col.has(doc_input, rev=bad_rev, check_rev=True)
+ assert err.value.error_code == 1200
+
+ # Test existing documents with bad revision
+ for doc_input in [
+ {'_key': doc_key, '_rev': bad_rev},
+ {'_id': doc_id, '_rev': bad_rev},
+ {'_id': doc_id, '_key': doc_key, '_rev': bad_rev},
+ ]:
+ with assert_raises(DocumentRevisionError) as err:
+ col.has(doc_input)
+ assert err.value.error_code == 1200
+
+ with assert_raises(DocumentRevisionError) as err:
+ col.has(doc_input, rev=bad_rev)
+ assert err.value.error_code == 1200
+
+ with assert_raises(DocumentRevisionError) as err:
+ col.has(doc_input, rev=bad_rev, check_rev=True)
+ assert err.value.error_code == 1200
+
+ assert doc_input in col
+ assert col.has(doc_input, rev=rev, check_rev=True) is True
+ assert col.has(doc_input, rev=rev, check_rev=False) is True
+ assert col.has(doc_input, rev=bad_rev, check_rev=False) is True
+
+ # Test missing documents
+ for doc_input in [
+ missing_doc_key,
+ missing_doc_id,
+ {'_key': missing_doc_key},
+ {'_id': missing_doc_id},
+ {'_id': missing_doc_id, '_key': missing_doc_key},
+ {'_key': missing_doc_key, '_rev': rev},
+ {'_id': missing_doc_id, '_rev': rev},
+ {'_id': missing_doc_id, '_key': missing_doc_key, '_rev': rev},
+ ]:
+ assert doc_input not in col
+ assert col.has(doc_input) is False
+ assert col.has(doc_input, rev=rev) is False
+ assert col.has(doc_input, rev=rev, check_rev=True) is False
+ assert col.has(doc_input, rev=rev, check_rev=False) is False
+
+ # Test documents with IDs with wrong collection name
+ expected_error_msg = 'bad collection name'
+ bad_id = generate_col_name() + '/' + doc_key
+ for doc_input in [
+ bad_id,
+ {'_id': bad_id},
+ {'_id': bad_id, '_rev': rev},
+ {'_id': bad_id, '_rev': bad_rev},
+ {'_id': bad_id, '_key': doc_key},
+ {'_id': bad_id, '_key': doc_key, '_rev': rev},
+ {'_id': bad_id, '_key': doc_key, '_rev': bad_rev},
+ ]:
+ with assert_raises(DocumentParseError) as err:
+ col.has(doc_input, check_rev=True)
+ assert expected_error_msg in str(err.value)
+
+ with assert_raises(DocumentParseError) as err:
+ col.has(doc_input, check_rev=False)
+ assert expected_error_msg in str(err.value)
+
+ with assert_raises(DocumentParseError) as err:
+ col.has(doc_input, rev=rev, check_rev=True)
+ assert expected_error_msg in str(err.value)
+
+ with assert_raises(DocumentParseError) as err:
+ col.has(doc_input, rev=rev, check_rev=False)
+ assert expected_error_msg in str(err.value)
+
+ # Test documents with missing "_id" and "_key" fields
+ expected_error_msg = 'field "_key" or "_id" required'
+ for doc_input in [
+ {},
+ {'foo': 'bar'},
+ {'foo': 'bar', '_rev': rev},
+ {'foo': 'bar', '_rev': bad_rev},
+ ]:
+ with assert_raises(DocumentParseError) as err:
+ col.has(doc_input, check_rev=True)
+ assert str(err.value) == expected_error_msg
+
+ with assert_raises(DocumentParseError) as err:
+ col.has(doc_input, check_rev=False)
+ assert str(err.value) == expected_error_msg
+
+ with assert_raises(DocumentParseError) as err:
+ col.has(doc_input, rev=rev, check_rev=True)
+ assert str(err.value) == expected_error_msg
+
+ with assert_raises(DocumentParseError) as err:
+ col.has(doc_input, rev=rev, check_rev=False)
+ assert str(err.value) == expected_error_msg
+
+ # Test get with bad database
+ with assert_raises(DocumentInError) as err:
+ bad_col.has(doc_key)
+ assert err.value.error_code == 1228
+
+ # Test contains with bad database
+ with assert_raises(DocumentInError) as err:
+ assert doc_key in bad_col
+ assert err.value.error_code == 1228
+
+
+def test_document_get(col, bad_col, docs):
+ # Set up test documents
+ col.import_bulk(docs)
+ doc = docs[0]
+ doc_val = doc['val']
+ doc_key = doc['_key']
+ doc_id = '{}/{}'.format(col.name, doc_key)
+
+ # Test get existing document by body
+ result = col.get(doc)
+ assert result['_key'] == doc_key
+ assert result['val'] == doc_val
+
+ # Test get existing document by ID
+ result = col.get(doc_id)
+ assert result['_key'] == doc_key
+ assert result['val'] == doc_val
+
+ # Test get existing document by key
+ result = col.get(doc_key)
+ assert result['_key'] == doc_key
+ assert result['val'] == doc_val
+
+ # Test get missing document
+ assert col.get(generate_doc_key()) is None
+
+ # Test get with correct revision
+ good_rev = col[doc_key]['_rev']
+ result = col.get(doc, rev=good_rev)
+ assert result['_key'] == doc_key
+ assert result['val'] == doc_val
+
+ # Test get with invalid revision
+ bad_rev = col[doc_key]['_rev'] + '0'
+ with assert_raises(DocumentRevisionError) as err:
+ col.get(doc_key, rev=bad_rev, check_rev=True)
+ assert err.value.error_code == 1200
+
+ # Test get with correct revision and check_rev turned off
+ result = col.get(doc, rev=bad_rev, check_rev=False)
+ assert result['_key'] == doc_key
+ assert result['_rev'] != bad_rev
+ assert result['val'] == doc_val
+
+ # Test get with bad database
+ with assert_raises(DocumentGetError) as err:
+ bad_col.get(doc['_key'])
+ assert err.value.error_code == 1228
+
+ # Test get with bad database
+ with assert_raises(DocumentGetError) as err:
+ assert bad_col[doc['_key']]
+ assert err.value.error_code == 1228
+
+
+def test_document_get_many(col, bad_col, docs):
+ # Set up test documents
+ col.import_bulk(docs)
+
+ # Test get_many missing documents
+ assert col.get_many([generate_doc_key()]) == []
+
+ # Test get_many existing documents
+ result = col.get_many(docs[:1])
+ result = clean_doc(result)
+ assert result == docs[:1]
+
+ result = col.get_many(docs)
+ assert clean_doc(result) == docs
+
+ # Test get_many in empty collection
+ col.truncate()
+ assert col.get_many([]) == []
+ assert col.get_many(docs[:1]) == []
+ assert col.get_many(docs[:3]) == []
+
+ with assert_raises(DocumentGetError) as err:
+ bad_col.get_many(docs)
+ assert err.value.error_code == 1228
+
+
+def test_document_all(col, bad_col, docs):
+ # Set up test documents
+ col.import_bulk(docs)
+
+ # Test all with default options
+ cursor = col.all()
+ result = list(cursor)
+ assert clean_doc(result) == docs
+
+ # Test all with a skip of 0
+ cursor = col.all(skip=0)
+ result = list(cursor)
+ assert cursor.count() == len(docs)
+ assert clean_doc(result) == docs
+
+ # Test all with a skip of 1
+ cursor = col.all(skip=1)
+ result = list(cursor)
+ assert cursor.count() == len(result) == 5
+ assert all([clean_doc(d) in docs for d in result])
+
+ # Test all with a skip of 3
+ cursor = col.all(skip=3)
+ result = list(cursor)
+ assert cursor.count() == len(result) == 3
+ assert all([clean_doc(d) in docs for d in result])
+
+ # Test all with a limit of 0
+ cursor = col.all(limit=0)
+ result = list(cursor)
+ assert cursor.count() == len(result) == 6
+
+ # Test all with a limit of 1
+ cursor = col.all(limit=1)
+ result = list(cursor)
+ assert cursor.count() == len(result) == 1
+ assert all([clean_doc(d) in docs for d in result])
+
+ # Test all with a limit of 3
+ cursor = col.all(limit=3)
+ result = list(cursor)
+ assert cursor.count() == len(result) == 3
+ assert all([clean_doc(d) in docs for d in result])
+
+ # Test all with skip and limit
+ cursor = col.all(skip=5, limit=2)
+ result = list(cursor)
+ assert cursor.count() == len(result) == 1
+ assert all([clean_doc(d) in docs for d in result])
+
+ # Test export with bad database
+ with assert_raises(DocumentGetError) as err:
+ bad_col.all()
+ assert err.value.error_code == 1228
+
+
+def test_document_ids(col, bad_col, docs):
+ cursor = col.ids()
+ result = list(cursor)
+ assert result == []
+
+ col.import_bulk(docs)
+ cursor = col.ids()
+ result = list(cursor)
+ ids = set('{}/{}'.format(col.name, d['_key']) for d in docs)
+ assert set(result) == ids
+
+ # Test ids with bad database
+ with assert_raises(DocumentIDsError) as err:
+ bad_col.ids()
+ assert err.value.error_code == 1228
+
+
+def test_document_keys(col, bad_col, docs):
+ cursor = col.keys()
+ result = list(cursor)
+ assert result == []
+
+ col.import_bulk(docs)
+ cursor = col.keys()
+ result = list(cursor)
+ assert len(result) == len(docs)
+ assert sorted(result) == extract('_key', docs)
+
+ # Test keys with bad database
+ with assert_raises(DocumentKeysError) as err:
+ bad_col.keys()
+ assert err.value.error_code == 1228
+
+
+# def test_document_export(col, bad_col, docs):
+# # Set up test documents
+# col.insert_many(docs)
+#
+# # Test export with flush set to True and flush_wait set to 1
+# cursor = col.export(flush=True, flush_wait=1)
+# assert clean_doc(cursor) == docs
+# assert cursor.type == 'export'
+#
+# # Test export with count
+# cursor = col.export(flush=False, count=True)
+# assert cursor.count == len(docs)
+# assert clean_doc(cursor) == docs
+#
+# # Test export with batch size
+# cursor = col.export(flush=False, count=True, batch_size=1)
+# assert cursor.count == len(docs)
+# assert clean_doc(cursor) == docs
+#
+# # Test export with time-to-live
+# cursor = col.export(flush=False, count=True, ttl=10)
+# assert cursor.count == len(docs)
+# assert clean_doc(cursor) == docs
+#
+# # Test export with filters
+# cursor = col.export(
+# count=True,
+# flush=False,
+# filter_fields=['text'],
+# filter_type='exclude'
+# )
+# assert cursor.count == len(docs)
+# assert all(['text' not in d for d in cursor])
+#
+# # Test export with a limit of 0
+# cursor = col.export(flush=False, count=True, limit=0)
+# assert cursor.count == len(docs)
+# assert clean_doc(cursor) == docs
+#
+# # Test export with a limit of 1
+# cursor = col.export(flush=False, count=True, limit=1)
+# assert cursor.count == 1
+# assert len(list(cursor)) == 1
+# all([clean_doc(d) in docs for d in cursor])
+#
+# # Test export with a limit of 3
+# cursor = col.export(flush=False, count=True, limit=3)
+# assert cursor.count == 3
+# assert len(list(cursor)) == 3
+# all([clean_doc(d) in docs for d in cursor])
+#
+# # Test export with bad database
+# with assert_raises(DocumentGetError):
+# bad_col.export()
+#
+# # Test closing export cursor
+# cursor = col.export(flush=False, count=True, batch_size=1)
+# assert cursor.close(ignore_missing=False) is True
+# assert cursor.close(ignore_missing=True) is False
+#
+# assert clean_doc(cursor.next()) in docs
+# with assert_raises(CursorNextError):
+# cursor.next()
+# with assert_raises(CursorCloseError):
+# cursor.close(ignore_missing=False)
+#
+# cursor = col.export(flush=False, count=True)
+# assert cursor.close(ignore_missing=True) is False
+
+
+def test_document_random(col, bad_col, docs):
+ # Set up test documents
+ col.import_bulk(docs)
+
+ # Test random in non-empty collection
+ for attempt in range(10):
+ random_doc = col.random()
+ assert clean_doc(random_doc) in docs
+
+ # Test random in empty collection
+ col.truncate()
+ for attempt in range(10):
+ random_doc = col.random()
+ assert random_doc is None
+
+ # Test random with bad database
+ with assert_raises(DocumentGetError) as err:
+ bad_col.random()
+ assert err.value.error_code == 1228
-def test_import_bulk():
+def test_document_import_bulk(col, bad_col, docs):
# Test import_bulk with default options
- result = col.import_bulk(test_docs)
- assert result['created'] == 5
+ result = col.import_bulk(docs)
+ assert result['created'] == len(docs)
assert result['errors'] == 0
assert result['empty'] == 0
assert result['updated'] == 0
assert result['ignored'] == 0
assert 'details' in result
- assert len(col) == 5
- for doc in test_docs:
- key = doc['_key']
- assert key in col
- assert col[key]['_key'] == key
- assert col[key]['val'] == doc['val']
- assert col[key]['coordinates'] == doc['coordinates']
+ for doc in docs:
+ doc_key = doc['_key']
+ assert doc_key in col
+ assert col[doc_key]['_key'] == doc_key
+ assert col[doc_key]['val'] == doc['val']
+ assert col[doc_key]['loc'] == doc['loc']
col.truncate()
# Test import bulk without details and with sync
- result = col.import_bulk(test_docs, details=False, sync=True)
- assert result['created'] == 5
+ result = col.import_bulk(docs, details=False, sync=True)
+ assert result['created'] == len(docs)
assert result['errors'] == 0
assert result['empty'] == 0
assert result['updated'] == 0
assert result['ignored'] == 0
assert 'details' not in result
- assert len(col) == 5
- for doc in test_docs:
- key = doc['_key']
- assert key in col
- assert col[key]['_key'] == key
- assert col[key]['val'] == doc['val']
- assert col[key]['coordinates'] == doc['coordinates']
- col.truncate()
+ for doc in docs:
+ doc_key = doc['_key']
+ assert doc_key in col
+ assert col[doc_key]['_key'] == doc_key
+ assert col[doc_key]['val'] == doc['val']
+ assert col[doc_key]['loc'] == doc['loc']
# Test import_bulk duplicates with halt_on_error
- with pytest.raises(DocumentInsertError):
- col.import_bulk([doc1, doc1], halt_on_error=True)
- assert len(col) == 0
+ with assert_raises(DocumentInsertError):
+ col.import_bulk(docs, halt_on_error=True)
# Test import bulk duplicates without halt_on_error
- result = col.import_bulk([doc2, doc2], halt_on_error=False)
- assert result['created'] == 1
- assert result['errors'] == 1
+ result = col.import_bulk(docs, halt_on_error=False)
+ assert result['created'] == 0
+ assert result['errors'] == len(docs)
assert result['empty'] == 0
assert result['updated'] == 0
assert result['ignored'] == 0
- assert len(col) == 1
+ col.truncate()
- # Test import bulk in missing collection
- with pytest.raises(DocumentInsertError):
- bad_col.import_bulk([doc3, doc4], halt_on_error=True)
- assert len(col) == 1
+ # Test import bulk with bad database
+ with assert_raises(DocumentInsertError):
+ bad_col.import_bulk(docs, halt_on_error=True)
+ assert len(col) == 0
# Test import bulk with overwrite
- result = col.import_bulk([doc3, doc4], overwrite=True)
- assert result['created'] == 2
+ result = col.import_bulk(docs, overwrite=True)
+ assert result['created'] == len(docs)
assert result['errors'] == 0
assert result['empty'] == 0
assert result['updated'] == 0
assert result['ignored'] == 0
- assert '1' not in col
- assert '2' not in col
- assert '3' in col
- assert '4' in col
+ for doc in docs:
+ doc_key = doc['_key']
+ assert doc_key in col
+ assert col[doc_key]['_key'] == doc_key
+ assert col[doc_key]['val'] == doc['val']
+ assert col[doc_key]['loc'] == doc['loc']
col.truncate()
- # Test import bulk to_prefix and from_prefix
- result = edge_col.import_bulk(
- test_edges, from_prefix='foo', to_prefix='bar'
- )
- assert result['created'] == 5
- assert result['errors'] == 0
- assert result['empty'] == 0
- assert result['updated'] == 0
- assert result['ignored'] == 0
- for edge in test_edges:
- key = edge['_key']
- assert key in edge_col
- assert edge_col[key]['_from'] == 'foo/' + edge['_from']
- assert edge_col[key]['_to'] == 'bar/' + edge['_to']
- edge_col.truncate()
-
# Test import bulk on_duplicate actions
- old_doc = {'_key': '1', 'foo': '2'}
- new_doc = {'_key': '1', 'bar': '3'}
+ doc = docs[0]
+ doc_key = doc['_key']
+ old_doc = {'_key': doc_key, 'foo': '2'}
+ new_doc = {'_key': doc_key, 'bar': '3'}
col.insert(old_doc)
- result = col.import_bulk([new_doc], on_duplicate='error')
+ result = col.import_bulk([new_doc], on_duplicate='error',
+ halt_on_error=False)
assert len(col) == 1
assert result['created'] == 0
assert result['errors'] == 1
assert result['empty'] == 0
assert result['updated'] == 0
assert result['ignored'] == 0
- assert col['1']['foo'] == '2'
- assert 'bar' not in col['1']
+ assert col[doc['_key']]['foo'] == '2'
+ assert 'bar' not in col[doc['_key']]
- result = col.import_bulk([new_doc], on_duplicate='ignore')
+ result = col.import_bulk([new_doc], on_duplicate='ignore',
+ halt_on_error=False)
assert len(col) == 1
assert result['created'] == 0
assert result['errors'] == 0
assert result['empty'] == 0
assert result['updated'] == 0
assert result['ignored'] == 1
- assert col['1']['foo'] == '2'
- assert 'bar' not in col['1']
+ assert col[doc['_key']]['foo'] == '2'
+ assert 'bar' not in col[doc['_key']]
- result = col.import_bulk([new_doc], on_duplicate='update')
+ result = col.import_bulk([new_doc], on_duplicate='update',
+ halt_on_error=False)
assert len(col) == 1
assert result['created'] == 0
assert result['errors'] == 0
assert result['empty'] == 0
assert result['updated'] == 1
assert result['ignored'] == 0
- assert col['1']['foo'] == '2'
- assert col['1']['bar'] == '3'
+ assert col[doc['_key']]['foo'] == '2'
+ assert col[doc['_key']]['bar'] == '3'
col.truncate()
col.insert(old_doc)
- result = col.import_bulk([new_doc], on_duplicate='replace')
+ result = col.import_bulk([new_doc], on_duplicate='replace',
+ halt_on_error=False)
assert len(col) == 1
assert result['created'] == 0
assert result['errors'] == 0
assert result['empty'] == 0
assert result['updated'] == 1
assert result['ignored'] == 0
- assert 'foo' not in col['1']
- assert col['1']['bar'] == '3'
+ assert 'foo' not in col[doc['_key']]
+ assert col[doc['_key']]['bar'] == '3'
+
+
+def test_document_edge(lecol, docs, edocs):
+ ecol = lecol # legacy edge collection
+
+ # Test insert edge without "_from" and "_to" fields
+ with assert_raises(DocumentInsertError):
+ ecol.insert(docs[0])
+
+ # Test insert many edges without "_from" and "_to" fields
+ for result in ecol.insert_many(docs):
+ assert isinstance(result, DocumentInsertError)
+
+ # Test update edge without "_from" and "_to" fields
+ with assert_raises(DocumentUpdateError):
+ ecol.update(docs[0])
+
+ # Test update many edges without "_from" and "_to" fields
+ for result in ecol.update_many(docs):
+ assert isinstance(result, DocumentUpdateError)
+
+ # Test replace edge without "_from" and "_to" fields
+ with assert_raises(DocumentReplaceError):
+ ecol.replace(docs[0])
+
+ # Test replace many edges without "_from" and "_to" fields
+ for result in ecol.replace_many(docs):
+ assert isinstance(result, DocumentReplaceError)
+
+ # Test edge document happy path
+ edoc = edocs[0]
+
+ # Test insert edge
+ result = ecol.insert(edoc, return_new=True, sync=True)
+ assert len(ecol) == 1
+ assert result['_id'] == '{}/{}'.format(ecol.name, edoc['_key'])
+ assert result['_key'] == edoc['_key']
+ assert result['new']['_key'] == edoc['_key'] == ecol[edoc]['_key']
+ assert result['new']['_from'] == edoc['_from'] == ecol[edoc]['_from']
+ assert result['new']['_to'] == edoc['_to'] == ecol[edoc]['_to']
+
+ # Test update edge
+ new_edoc = edoc.copy()
+ new_edoc.update({'_from': 'foo', '_to': 'bar'})
+ result = ecol.update(new_edoc, return_old=True, return_new=True)
+ assert result['_id'] == '{}/{}'.format(ecol.name, edoc['_key'])
+ assert result['_key'] == edoc['_key']
+ assert result['new']['_key'] == new_edoc['_key']
+ assert result['new']['_from'] == new_edoc['_from']
+ assert result['new']['_to'] == new_edoc['_to']
+ assert result['old']['_key'] == edoc['_key']
+ assert result['old']['_from'] == edoc['_from']
+ assert result['old']['_to'] == edoc['_to']
+ assert ecol[edoc]['_key'] == edoc['_key']
+ assert ecol[edoc]['_from'] == new_edoc['_from']
+ assert ecol[edoc]['_to'] == new_edoc['_to']
+ edoc = new_edoc
+
+ # Test replace edge
+ new_edoc = edoc.copy()
+ new_edoc.update({'_from': 'baz', '_to': 'qux'})
+ result = ecol.replace(new_edoc, return_old=True, return_new=True)
+ assert result['_id'] == '{}/{}'.format(ecol.name, edoc['_key'])
+ assert result['_key'] == edoc['_key']
+ assert result['new']['_key'] == new_edoc['_key']
+ assert result['new']['_from'] == new_edoc['_from']
+ assert result['new']['_to'] == new_edoc['_to']
+ assert result['old']['_key'] == edoc['_key']
+ assert result['old']['_from'] == edoc['_from']
+ assert result['old']['_to'] == edoc['_to']
+ assert ecol[edoc]['_key'] == edoc['_key']
+ assert ecol[edoc]['_from'] == new_edoc['_from']
+ assert ecol[edoc]['_to'] == new_edoc['_to']
+ edoc = new_edoc
+
+ # Test delete edge
+ result = ecol.delete(edoc, return_old=True)
+ assert result['_id'] == '{}/{}'.format(ecol.name, edoc['_key'])
+ assert result['_key'] == edoc['_key']
+ assert result['old']['_key'] == edoc['_key']
+ assert result['old']['_from'] == edoc['_from']
+ assert result['old']['_to'] == edoc['_to']
+ assert edoc not in ecol
+
+ # Test insert many edges
+ results = ecol.insert_many(edocs, return_new=True, sync=True)
+ for result, edoc in zip(results, edocs):
+ assert result['_id'] == '{}/{}'.format(ecol.name, edoc['_key'])
+ assert result['_key'] == edoc['_key']
+ assert result['new']['_key'] == edoc['_key']
+ assert result['new']['_from'] == edoc['_from']
+ assert result['new']['_to'] == edoc['_to']
+ assert ecol[edoc]['_key'] == edoc['_key']
+ assert ecol[edoc]['_from'] == edoc['_from']
+ assert ecol[edoc]['_to'] == edoc['_to']
+ assert len(ecol) == 4
+
+ # Test update many edges
+ for edoc in edocs:
+ edoc['foo'] = 1
+ results = ecol.update_many(edocs, return_new=True, sync=True)
+ for result, edoc in zip(results, edocs):
+ assert result['_id'] == '{}/{}'.format(ecol.name, edoc['_key'])
+ assert result['_key'] == edoc['_key']
+ assert result['new']['_key'] == edoc['_key']
+ assert result['new']['_from'] == edoc['_from']
+ assert result['new']['_to'] == edoc['_to']
+ assert result['new']['foo'] == 1
+ assert ecol[edoc]['_key'] == edoc['_key']
+ assert ecol[edoc]['_from'] == edoc['_from']
+ assert ecol[edoc]['_to'] == edoc['_to']
+ assert ecol[edoc]['foo'] == 1
+ assert len(ecol) == 4
+
+ # Test replace many edges
+ for edoc in edocs:
+ edoc['bar'] = edoc.pop('foo')
+ results = ecol.replace_many(edocs, return_new=True, sync=True)
+ for result, edoc in zip(results, edocs):
+ assert result['_id'] == '{}/{}'.format(ecol.name, edoc['_key'])
+ assert result['_key'] == edoc['_key']
+ assert result['new']['_key'] == edoc['_key']
+ assert result['new']['_from'] == edoc['_from']
+ assert result['new']['_to'] == edoc['_to']
+ assert result['new']['bar'] == 1
+ assert 'foo' not in result['new']
+ assert ecol[edoc]['_key'] == edoc['_key']
+ assert ecol[edoc]['_from'] == edoc['_from']
+ assert ecol[edoc]['_to'] == edoc['_to']
+ assert ecol[edoc]['bar'] == 1
+ assert 'foo' not in ecol[edoc]
+ assert len(ecol) == 4
+
+ results = ecol.delete_many(edocs, return_old=True)
+ for result, edoc in zip(results, edocs):
+ assert result['_id'] == '{}/{}'.format(ecol.name, edoc['_key'])
+ assert result['_key'] == edoc['_key']
+ assert result['old']['_key'] == edoc['_key']
+ assert result['old']['_from'] == edoc['_from']
+ assert result['old']['_to'] == edoc['_to']
+ assert edoc not in ecol
+ assert edoc['_key'] not in ecol
+ assert len(ecol) == 0
+
+ # Test import bulk to_prefix and from_prefix
+ for doc in edocs:
+ doc['_from'] = 'foo'
+ doc['_to'] = 'bar'
+ result = ecol.import_bulk(edocs, from_prefix='from', to_prefix='to')
+ assert result['created'] == 4
+ assert result['errors'] == 0
+ assert result['empty'] == 0
+ assert result['updated'] == 0
+ assert result['ignored'] == 0
+ for edoc in ecol:
+ assert edoc['_from'] == 'from/foo'
+ assert edoc['_to'] == 'to/bar'
+
+
+def test_document_management_via_db(db, col):
+ doc1_id = col.name + '/foo'
+ doc2_id = col.name + '/bar'
+ doc1 = {'_key': 'foo'}
+ doc2 = {'_id': doc2_id}
+
+ # Test document insert with empty body
+ result = db.insert_document(col.name, {})
+ assert len(col) == 1
+ assert db.has_document(result['_id']) is True
+ assert db.has_document(result['_id'], rev=result['_rev']) is True
+
+ # Test document insert with key
+ assert db.has_document(doc1_id) is False
+ result = db.insert_document(col.name, doc1)
+ assert result['_key'] == 'foo'
+ assert result['_id'] == doc1_id
+ assert len(col) == 2
+ assert db.has_document(doc1_id) is True
+ assert db.has_document(doc1_id, rev=result['_rev']) is True
+
+ # Test document insert with ID
+ assert db.has_document(doc2_id) is False
+ result = db.insert_document(col.name, doc2)
+ assert result['_key'] == 'bar'
+ assert result['_id'] == doc2_id
+ assert len(col) == 3
+ assert db.has_document(doc2_id) is True
+ assert db.has_document(doc2_id, rev=result['_rev']) is True
+
+ # Test document get with bad input
+ with assert_raises(DocumentParseError) as err:
+ db.document(doc1)
+ assert str(err.value) == 'field "_id" required'
+
+ # Test document get
+ for doc_id in [doc1_id, doc2_id]:
+ result = db.document(doc_id)
+ assert '_rev' in result
+ assert '_key' in result
+ assert result['_id'] == doc_id
+
+ # Test document update with bad input
+ with assert_raises(DocumentParseError) as err:
+ db.update_document(doc1)
+ assert str(err.value) == 'field "_id" required'
+
+ # Test document update
+ result = db.update_document({'_id': doc1_id, 'val': 100})
+ assert result['_id'] == doc1_id
+ assert col[doc1_id]['val'] == 100
+ assert len(col) == 3
+
+ # Test document replace with bad input
+ with assert_raises(DocumentParseError) as err:
+ db.replace_document(doc1)
+ assert str(err.value) == 'field "_id" required'
+
+ # Test document replace
+ result = db.replace_document({'_id': doc1_id, 'num': 300})
+ assert result['_id'] == doc1_id
+ assert 'val' not in col[doc1_id]
+ assert col[doc1_id]['num'] == 300
+ assert len(col) == 3
+
+ # Test document delete with bad input
+ with assert_raises(DocumentParseError) as err:
+ db.delete_document(doc1)
+ assert str(err.value) == 'field "_id" required'
+
+ # Test document delete
+ result = db.delete_document({'_id': doc1_id})
+ assert result['_id'] == doc1_id
+ assert doc1_id not in col
+ assert len(col) == 2
diff --git a/tests/test_edge.py b/tests/test_edge.py
deleted file mode 100644
index ace0b56a..00000000
--- a/tests/test_edge.py
+++ /dev/null
@@ -1,523 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-import pytest
-from six import string_types
-
-from arango import ArangoClient
-from arango.exceptions import *
-
-from .utils import (
- generate_db_name,
- generate_col_name,
-)
-
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-ecol_name = generate_col_name()
-ecol = db.create_collection(ecol_name, edge=True)
-ecol.add_geo_index(['coordinates'])
-
-# Set up test collection and edges
-col_name = generate_col_name()
-db.create_collection(col_name).import_bulk([
- {'_key': '1'}, {'_key': '2'}, {'_key': '3'}, {'_key': '4'}, {'_key': '5'}
-])
-edge1 = {'_key': '1', '_from': col_name + '/1', '_to': col_name + '/2'}
-edge2 = {'_key': '2', '_from': col_name + '/2', '_to': col_name + '/3'}
-edge3 = {'_key': '3', '_from': col_name + '/3', '_to': col_name + '/4'}
-edge4 = {'_key': '4', '_from': col_name + '/1', '_to': col_name + '/1'}
-edge5 = {'_key': '5', '_from': col_name + '/5', '_to': col_name + '/3'}
-test_edges = [edge1, edge2, edge3, edge4, edge5]
-test_edge_keys = [e['_key'] for e in test_edges]
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
-
-
-def setup_function(*_):
- ecol.truncate()
-
-
-def test_insert():
- # Test insert first valid edge with return_new and sync
- result = ecol.insert(edge1, return_new=True, sync=True)
- assert len(ecol) == 1
- assert result['_id'] == '{}/{}'.format(ecol_name, edge1['_key'])
- assert result['_key'] == edge1['_key']
- assert isinstance(result['_rev'], string_types)
- assert result['new']['_key'] == edge1['_key']
- assert result['new']['_from'] == edge1['_from']
- assert result['new']['_to'] == edge1['_to']
- assert result['sync'] is True
- assert ecol['1']['_key'] == edge1['_key']
- assert ecol['1']['_from'] == edge1['_from']
- assert ecol['1']['_to'] == edge1['_to']
-
- # Test insert invalid edges
- with pytest.raises(DocumentInsertError):
- ecol.insert({'foo': 'bar'})
- assert len(ecol) == 1 and '2' not in ecol
-
- with pytest.raises(DocumentInsertError):
- ecol.insert({'_key': '2'})
- assert len(ecol) == 1 and '2' not in ecol
-
- with pytest.raises(DocumentInsertError):
- ecol.insert({'_key': '2', '_to': col_name + '/3'})
- assert len(ecol) == 1 and '2' not in ecol
-
- with pytest.raises(DocumentInsertError):
- ecol.insert({'_key': '2', '_from': col_name + '/3'})
- assert len(ecol) == 1 and '2' not in ecol
-
- # Test insert second valid edge without return_new and sync
- result = ecol.insert(edge2, return_new=False, sync=False)
- assert len(ecol) == 2
- assert result['_id'] == '{}/{}'.format(ecol_name, edge2['_key'])
- assert result['_key'] == edge2['_key']
- assert isinstance(result['_rev'], string_types)
- assert 'new' not in result
- assert result['sync'] is False
- assert ecol['2']['_key'] == edge2['_key']
- assert ecol['2']['_from'] == edge2['_from']
- assert ecol['2']['_to'] == edge2['_to']
-
-
-def test_insert_many():
- # Test insert_many valid edges with return_new and sync
- results = ecol.insert_many(test_edges, return_new=True, sync=True)
- for result, edge in zip(results, test_edges):
- key = edge['_key']
- assert result['_id'] == '{}/{}'.format(ecol_name, key)
- assert result['_key'] == key
- assert isinstance(result['_rev'], string_types)
- assert result['new']['_key'] == key
- assert result['new']['_from'] == edge['_from']
- assert result['new']['_to'] == edge['_to']
- assert result['sync'] is True
- assert ecol[key]['_key'] == key
- assert ecol[key]['_from'] == edge['_from']
- assert ecol[key]['_to'] == edge['_to']
- assert len(ecol) == 5
- ecol.truncate()
-
- # Test insert_many valid edges with return_new and sync
- invalid_edges = [
- {'foo': 'bar'},
- {'_key': '1'},
- {'_key': '2', '_to': col_name + '/3'},
- {'_key': '3', '_from': col_name + '/3'},
- ]
- results = ecol.insert_many(invalid_edges, return_new=False, sync=False)
- for result, edge in zip(results, invalid_edges):
- isinstance(result, DocumentInsertError)
- if '_key' in edge:
- assert edge['_key'] not in ecol
- assert len(ecol) == 0
- ecol.truncate()
-
-
-def test_update():
- # Set up test edges
- edge = edge1.copy()
- ecol.insert(edge)
-
- # Test update edge _from and _to to invalid edges
- edge['_from'] = None
- edge['_to'] = None
- result = ecol.update(edge, return_old=True, return_new=True)
- assert result['_id'] == '{}/{}'.format(ecol.name, edge['_key'])
- assert result['_key'] == edge['_key']
- assert isinstance(result['_rev'], string_types)
- assert result['new']['_key'] == edge['_key']
- assert result['new']['_from'] is None
- assert result['new']['_to'] is None
- assert result['old']['_key'] == edge1['_key']
- assert result['old']['_from'] == edge1['_from']
- assert result['old']['_to'] == edge1['_to']
- assert result['sync'] is False
- assert ecol['1']['_key'] == edge1['_key']
- assert ecol['1']['_from'] is None
- assert ecol['1']['_to'] is None
-
- # TODO should this really be allowed?
- # Test update edge _from and _to to valid edges
- edge['_from'] = edge2['_from']
- edge['_to'] = edge2['_to']
- result = ecol.update(edge, return_old=True, return_new=True)
- assert result['_id'] == '{}/{}'.format(ecol.name, edge['_key'])
- assert result['_key'] == edge['_key']
- assert isinstance(result['_rev'], string_types)
- assert result['new']['_key'] == edge1['_key']
- assert result['new']['_from'] == edge2['_from']
- assert result['new']['_to'] == edge2['_to']
- assert result['old']['_key'] == edge1['_key']
- assert result['old']['_from'] is None
- assert result['old']['_to'] is None
- assert result['sync'] is False
- assert ecol['1']['_key'] == edge1['_key']
- assert ecol['1']['_from'] == edge2['_from']
- assert ecol['1']['_to'] == edge2['_to']
-
-
-def test_update_many():
- # Set up test edges
- ecol.import_bulk(test_edges)
-
- # Test update mix of valid and invalid edges
- new_edges = [
- {'_key': '1', '_to': 'foo', '_from': 'bar'},
- {'_key': '2', '_to': 'foo', 'val': 'baz'},
- {'_key': '3', '_from': 'bar'},
- {'_key': '6', '_from': 'bar'},
- {'foo': 'bar', 'bar': 'baz'}
- ]
- results = ecol.update_many(new_edges, return_new=True, return_old=True)
-
- assert results[0]['old']['_to'] == edge1['_to']
- assert results[0]['old']['_from'] == edge1['_from']
- assert results[0]['new']['_to'] == 'foo'
- assert results[0]['new']['_from'] == 'bar'
-
- assert results[1]['old']['_to'] == edge2['_to']
- assert results[1]['old']['_from'] == edge2['_from']
- assert results[1]['new']['_to'] == 'foo'
- assert results[1]['new']['_from'] == edge2['_from']
-
- assert results[2]['old']['_to'] == edge3['_to']
- assert results[2]['old']['_from'] == edge3['_from']
- assert results[2]['new']['_to'] == edge3['_to']
- assert results[2]['new']['_from'] == 'bar'
-
- assert isinstance(results[3], DocumentUpdateError)
- assert isinstance(results[4], DocumentUpdateError)
-
-
-def test_update_match():
- # Set up test edges
- ecol.insert_many(test_edges)
-
- # Test update single matching document
- assert ecol.update_match(
- {'_key': '1'},
- {'_to': 'foo'}
- ) == 1
- assert ecol['1']['_to'] == 'foo'
- assert ecol['1']['_from'] == edge1['_from']
-
- # Test update multiple matching documents
- assert ecol.update_match(
- {'_from': col_name + '/1'},
- {'foo': 'bar'}
- ) == 2
- assert ecol['1']['foo'] == 'bar'
- assert ecol['4']['foo'] == 'bar'
-
- # Test update multiple matching documents with arguments
- assert ecol.update_match(
- {'_from': col_name + '/1'},
- {'foo': None, 'bar': 'baz'},
- limit=1,
- sync=True,
- keep_none=False
- ) == 1
- assert ecol['1']['foo'] == 'bar'
- assert 'foo' not in ecol['4']
- assert ecol['4']['bar'] == 'baz'
-
- # Test unaffected document
- assert ecol['2']['_to'] == edge2['_to']
- assert 'foo' not in ecol['2']
-
- # Test update matching documents in missing collection
- bad_ecol_name = generate_col_name()
- with pytest.raises(DocumentUpdateError):
- bad_ecol = db.collection(bad_ecol_name)
- bad_ecol.update_match({'_key': '1'}, {'foo': 100})
-
-
-def test_replace():
- # Set up test edges
- edge = edge1.copy()
- ecol.insert(edge)
-
- # Test replace edge _from and _to to invalid edges
- edge['_from'] = None
- edge['_to'] = None
- with pytest.raises(DocumentReplaceError):
- ecol.replace(edge, return_old=True, return_new=True)
- assert ecol['1']['_key'] == edge1['_key']
- assert ecol['1']['_from'] == edge1['_from']
- assert ecol['1']['_to'] == edge1['_to']
-
- # Test replace edge _from and _to to missing edges
- edge['_from'] = 'missing/edge'
- edge['_to'] = 'missing/edge'
- ecol.replace(edge, return_old=True, return_new=True)
- assert ecol['1']['_key'] == edge1['_key']
- assert ecol['1']['_from'] == 'missing/edge'
- assert ecol['1']['_to'] == 'missing/edge'
-
- # Test replace edge _from and _to to missing edges
- edge['_from'] = edge2['_from']
- edge['_to'] = edge2['_to']
- ecol.replace(edge, return_old=True, return_new=True)
- assert ecol['1']['_key'] == edge1['_key']
- assert ecol['1']['_from'] == edge2['_from']
- assert ecol['1']['_to'] == edge2['_to']
-
-
-def test_replace_many():
- # Set up test edges
- ecol.insert_many(test_edges)
-
- # Test replace mix of valid and invalid edges
- new_edges = [
- {'_key': '1', '_to': 'foo', '_from': 'bar'},
- {'_key': '2', '_to': 'foo', 'val': 'baz'},
- {'_key': '3', '_from': 'bar'},
- {'_key': '5', '_from': 'bar'},
- {'foo': 'bar', 'bar': 'baz'}
- ]
- results = ecol.replace_many(new_edges, return_new=True, return_old=True)
-
- assert results[0]['old']['_to'] == edge1['_to']
- assert results[0]['old']['_from'] == edge1['_from']
- assert results[0]['new']['_to'] == 'foo'
- assert results[0]['new']['_from'] == 'bar'
-
- assert isinstance(results[1], DocumentReplaceError)
- assert isinstance(results[2], DocumentReplaceError)
- assert isinstance(results[3], DocumentReplaceError)
- assert isinstance(results[4], DocumentReplaceError)
-
-
-def test_replace_match():
- # Set up test edges
- ecol.insert_many(test_edges)
-
- # Test replace single matching document with invalid body
- with pytest.raises(DocumentReplaceError):
- ecol.replace_match({'_key': '3'}, {'_to': edge2['_to']})
-
- # Test replace single matching document with valid body
- assert ecol.replace_match(
- {'_key': '3'},
- {'_to': edge2['_to'], '_from': edge3['_from']}
- ) == 1
- assert ecol['3']['_to'] == edge2['_to']
- assert ecol['3']['_from'] == edge3['_from']
-
- # Test replace multiple matching documents
- assert ecol.replace_match(
- {'_from': col_name + '/1'},
- {'_to': edge1['_to'], '_from': edge1['_from'], 'foo': 'bar'}
- ) == 2
- assert ecol['1']['foo'] == 'bar'
- assert ecol['1']['_to'] == edge1['_to']
- assert ecol['1']['_from'] == edge1['_from']
-
- assert ecol['4']['foo'] == 'bar'
-
- # Test replace multiple matching documents with arguments
- assert ecol.replace_match(
- {'_from': col_name + '/1'},
- {'_to': edge3['_to'], '_from': edge3['_from'], 'foo': 'baz'},
- limit=1,
- sync=True,
- ) == 1
- assert ecol['1']['foo'] == 'baz'
- assert ecol['4']['foo'] == 'bar'
-
- # Test unaffected document
- assert ecol['2']['_to'] == edge2['_to']
- assert 'foo' not in ecol['2']
-
- # Test replace matching documents in missing collection
- bad_ecol_name = generate_col_name()
- with pytest.raises(DocumentReplaceError):
- bad_ecol = db.collection(bad_ecol_name)
- bad_ecol.replace_match({'_key': '1'}, {'foo': 100})
-
-
-def test_delete():
- # Set up test edges
- ecol.import_bulk(test_edges)
-
- # Test delete (edge) with default options
- result = ecol.delete(edge1)
- assert result['_id'] == '{}/{}'.format(ecol.name, edge1['_key'])
- assert result['_key'] == edge1['_key']
- assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
- assert 'old' not in result
- assert edge1['_key'] not in ecol
- assert len(ecol) == 4
-
- # Test delete (edge key) with default options
- result = ecol.delete(edge2['_key'])
- assert result['_id'] == '{}/{}'.format(ecol.name, edge2['_key'])
- assert result['_key'] == edge2['_key']
- assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
- assert 'old' not in result
- assert edge2['_key'] not in ecol
- assert len(ecol) == 3
-
- # Test delete (edge) with return_old
- result = ecol.delete(edge3, return_old=True)
- assert result['_id'] == '{}/{}'.format(ecol.name, edge3['_key'])
- assert result['_key'] == edge3['_key']
- assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
- assert result['old']['_key'] == edge3['_key']
- assert result['old']['_to'] == edge3['_to']
- assert result['old']['_from'] == edge3['_from']
- assert edge3['_key'] not in ecol
- assert len(ecol) == 2
-
- # Test delete (edge key) with sync
- result = ecol.delete(edge4, sync=True)
- assert result['_id'] == '{}/{}'.format(ecol.name, edge4['_key'])
- assert result['_key'] == edge4['_key']
- assert isinstance(result['_rev'], string_types)
- assert result['sync'] is True
- assert edge4['_key'] not in ecol
- assert len(ecol) == 1
-
- # Test delete (edge) with check_rev
- rev = ecol[edge5['_key']]['_rev'] + '000'
- bad_edge = edge5.copy()
- bad_edge.update({'_rev': rev})
- with pytest.raises(ArangoError):
- ecol.delete(bad_edge, check_rev=True)
- assert bad_edge['_key'] in ecol
- assert len(ecol) == 1
-
- # Test delete (edge) with check_rev
- assert ecol.delete(edge4, ignore_missing=True) is False
- with pytest.raises(DocumentDeleteError):
- ecol.delete(edge4, ignore_missing=False)
- assert len(ecol) == 1
-
- # Test delete with missing edge collection
- bad_col = generate_col_name()
- with pytest.raises(DocumentDeleteError):
- db.collection(bad_col).delete(edge5)
-
- bad_col = generate_col_name()
- with pytest.raises(DocumentDeleteError):
- db.collection(bad_col).delete(edge5['_key'])
-
-
-def test_delete_many():
- # Set up test edges
- current_revs = {}
- edges = [edge.copy() for edge in test_edges]
-
- # Test delete_many (edges) with default options
- ecol.import_bulk(edges)
- results = ecol.delete_many(edges)
- for result, key in zip(results, test_edge_keys):
- assert result['_id'] == '{}/{}'.format(ecol.name, key)
- assert result['_key'] == key
- assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
- assert 'old' not in result
- assert key not in ecol
- current_revs[key] = result['_rev']
- assert len(ecol) == 0
-
- # Test delete_many (edge keys) with default options
- ecol.import_bulk(edges)
- results = ecol.delete_many(edges)
- for result, key in zip(results, test_edge_keys):
- assert result['_id'] == '{}/{}'.format(ecol.name, key)
- assert result['_key'] == key
- assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
- assert 'old' not in result
- assert key not in ecol
- current_revs[key] = result['_rev']
- assert len(ecol) == 0
-
- # Test delete_many (edges) with return_old
- ecol.import_bulk(edges)
- results = ecol.delete_many(edges, return_old=True)
- for result, edge in zip(results, edges):
- key = edge['_key']
- assert result['_id'] == '{}/{}'.format(ecol.name, key)
- assert result['_key'] == key
- assert isinstance(result['_rev'], string_types)
- assert result['sync'] is False
- assert result['old']['_key'] == key
- assert result['old']['_to'] == edge['_to']
- assert result['old']['_from'] == edge['_from']
- assert key not in ecol
- current_revs[key] = result['_rev']
- assert len(ecol) == 0
-
- # Test delete_many (edge keys) with sync
- ecol.import_bulk(edges)
- results = ecol.delete_many(edges, sync=True)
- for result, edge in zip(results, edges):
- key = edge['_key']
- assert result['_id'] == '{}/{}'.format(ecol.name, key)
- assert result['_key'] == key
- assert isinstance(result['_rev'], string_types)
- assert result['sync'] is True
- assert 'old' not in result
- assert key not in ecol
- current_revs[key] = result['_rev']
- assert len(ecol) == 0
-
- # Test delete_many (edges) with check_rev
- ecol.import_bulk(edges)
- for edge in edges:
- edge['_rev'] = current_revs[edge['_key']] + '000'
- results = ecol.delete_many(edges, check_rev=True)
- for result, edge in zip(results, edges):
- assert isinstance(result, DocumentRevisionError)
- assert len(ecol) == 5
-
- # Test delete_many (edges) with missing edges
- ecol.truncate()
- results = ecol.delete_many([{'_key': '6'}, {'_key': '7'}])
- for result, edge in zip(results, edges):
- assert isinstance(result, DocumentDeleteError)
- assert len(ecol) == 0
-
- # Test delete_many with missing edge collection
- bad_ecol = generate_col_name()
- with pytest.raises(DocumentDeleteError):
- db.collection(bad_ecol).delete_many(edges)
-
- bad_ecol = generate_col_name()
- with pytest.raises(DocumentDeleteError):
- db.collection(bad_ecol).delete_many(test_edge_keys)
-
-
-def test_delete_match():
- # Test preconditions
- assert ecol.delete_match({'_from': col_name + '/1'}) == 0
-
- # Set up test documents
- ecol.import_bulk(test_edges)
-
- # Test delete matching document with default options
- assert '3' in ecol
- assert ecol.delete_match({'_key': '3'}) == 1
- assert '3' not in ecol
-
- # Test delete matching documents with sync
- assert '1' in ecol
- assert '4' in ecol
- assert ecol.delete_match({'_from': col_name + '/1'}, sync=True) == 2
- assert '1' not in ecol
-
- # Test delete matching documents with limit of 2
- assert [doc['_to'] for doc in ecol].count(col_name + '/3') == 2
- assert ecol.delete_match({'_to': col_name + '/3'}, limit=1) == 1
- assert [doc['_to'] for doc in ecol].count(col_name + '/3') == 1
diff --git a/tests/test_exception.py b/tests/test_exception.py
new file mode 100644
index 00000000..230d8c6c
--- /dev/null
+++ b/tests/test_exception.py
@@ -0,0 +1,82 @@
+from __future__ import absolute_import, unicode_literals
+
+import json
+
+import pytest
+from requests.structures import CaseInsensitiveDict
+
+from arango.exceptions import (
+ ArangoServerError,
+ DocumentInsertError,
+ DocumentParseError,
+ ArangoClientError
+)
+from arango.request import Request
+from arango.response import Response
+
+
+def test_server_error(col, docs, url):
+ document = docs[0]
+ with pytest.raises(DocumentInsertError) as err:
+ col.insert(document, return_new=False)
+ col.insert(document, return_new=False) # duplicate key error
+ exc = err.value
+
+ assert isinstance(exc, ArangoServerError)
+ assert exc.source == 'server'
+ assert exc.message == str(exc)
+ assert exc.message.startswith('[HTTP 409][ERR 1210] unique constraint')
+ assert exc.url.startswith(url)
+ assert exc.error_code == 1210
+ assert exc.http_method == 'post'
+ assert exc.http_code == 409
+ assert exc.http_headers['Server'] == 'ArangoDB'
+ assert isinstance(exc.http_headers, CaseInsensitiveDict)
+
+ resp = exc.response
+ expected_body = {
+ 'code': exc.http_code,
+ 'error': True,
+ 'errorNum': exc.error_code,
+ 'errorMessage': exc.error_message
+ }
+ assert isinstance(resp, Response)
+ assert resp.is_success is False
+ assert resp.error_code == exc.error_code
+ assert resp.body == expected_body
+ assert resp.error_code == 1210
+ assert resp.method == 'post'
+ assert resp.status_code == 409
+ assert resp.status_text == 'Conflict'
+ assert json.loads(resp.raw_body) == expected_body
+ assert resp.headers == exc.http_headers
+ assert resp.url.startswith(url)
+
+ req = exc.request
+ assert isinstance(req, Request)
+ assert req.headers['content-type'] == 'application/json'
+ assert req.method == 'post'
+ assert req.read is None
+ assert req.write == col.name
+ assert req.command is None
+ assert req.params == {'returnNew': 0, 'silent': 0}
+ assert req.data == json.dumps(document)
+ assert req.endpoint.startswith('/_api/document/' + col.name)
+
+
+def test_client_error(col):
+ with pytest.raises(DocumentParseError) as err:
+ col.get({'_id': 'invalid'}) # malformed document
+ exc = err.value
+
+ assert isinstance(exc, ArangoClientError)
+ assert exc.source == 'client'
+ assert exc.error_code is None
+ assert exc.error_message is None
+ assert exc.message == str(exc)
+ assert exc.message.startswith('bad collection name')
+ assert exc.url is None
+ assert exc.http_method is None
+ assert exc.http_code is None
+ assert exc.http_headers is None
+ assert exc.response is None
diff --git a/tests/test_foxx.py b/tests/test_foxx.py
new file mode 100644
index 00000000..6c6a5122
--- /dev/null
+++ b/tests/test_foxx.py
@@ -0,0 +1,373 @@
+from __future__ import absolute_import, unicode_literals
+
+import json
+
+import os
+
+from six import string_types
+
+from arango.exceptions import (
+ FoxxServiceGetError,
+ FoxxServiceListError,
+ FoxxServiceCreateError,
+ FoxxServiceUpdateError,
+ FoxxServiceReplaceError,
+ FoxxServiceDeleteError,
+ FoxxConfigGetError,
+ FoxxConfigUpdateError,
+ FoxxDependencyGetError,
+ FoxxDependencyUpdateError,
+ FoxxDependencyReplaceError,
+ FoxxConfigReplaceError,
+ FoxxDevModeEnableError,
+ FoxxDevModeDisableError,
+ FoxxReadmeGetError,
+ FoxxSwaggerGetError,
+ FoxxDownloadError,
+ FoxxCommitError,
+ FoxxScriptListError,
+ FoxxScriptRunError,
+ FoxxTestRunError
+)
+from arango.foxx import Foxx
+from tests.helpers import (
+ assert_raises,
+ extract,
+ generate_service_mount
+)
+
+if os.getenv('TRAVIS', False):
+ # noinspection PyUnresolvedReferences
+ service_file = os.path.join(os.sep, 'tmp', 'service.zip')
+else:
+ cwd = os.getcwd()
+ path = [cwd]
+ if not cwd.endswith('tests'):
+ path.append('tests')
+ path.extend(['static', 'service.zip'])
+ service_file = os.path.join(*path)
+
+
+def test_foxx_attributes(db):
+ assert isinstance(db.foxx, Foxx)
+ assert repr(db.foxx) == ''.format(db.name)
+
+
+def test_foxx_service_management(db, bad_db):
+ service_mount = generate_service_mount()
+ missing_mount = generate_service_mount()
+
+ # Test list services
+ for service in db.foxx.services():
+ assert 'development' in service
+ assert 'legacy' in service
+ assert 'mount' in service
+ assert 'name' in service
+ assert 'provides' in service
+ assert 'version' in service
+
+ # Test list services with bad database
+ with assert_raises(FoxxServiceListError) as err:
+ bad_db.foxx.services()
+ assert err.value.error_code == 1228
+
+ # Test create service
+ service = db.foxx.create_service(
+ mount=service_mount,
+ source=service_file,
+ config={},
+ dependencies={},
+ development=True,
+ setup=True,
+ legacy=True
+ )
+ assert service['mount'] == service_mount
+ assert service['name'] == 'test'
+ assert service['development'] is True
+ assert service['legacy'] is True
+ assert service['manifest']['configuration'] == {}
+ assert service['manifest']['dependencies'] == {}
+
+ # Test create duplicate service
+ with assert_raises(FoxxServiceCreateError) as err:
+ db.foxx.create_service(service_mount, 'service.zip')
+ assert err.value.error_code == 3011
+
+ # Test get service
+ service = db.foxx.service(service_mount)
+ assert service['mount'] == service_mount
+ assert service['name'] == 'test'
+ assert service['development'] is True
+ assert service['manifest']['configuration'] == {}
+ assert service['manifest']['dependencies'] == {}
+ assert 'checksum' in service
+ assert 'options' in service
+ assert 'path' in service
+ assert 'version' in service
+
+ # Test get missing service
+ with assert_raises(FoxxServiceGetError) as err:
+ db.foxx.service(missing_mount)
+ assert err.value.error_code == 3009
+
+ # Test update service
+ service = db.foxx.update_service(
+ mount=service_mount,
+ source=service_file,
+ config={},
+ dependencies={},
+ teardown=True,
+ setup=True,
+ legacy=False
+ )
+ assert service['mount'] == service_mount
+ assert service['name'] == 'test'
+ assert service['legacy'] is False
+
+ # Test update missing service
+ with assert_raises(FoxxServiceUpdateError) as err:
+ db.foxx.update_service(missing_mount, 'service.zip')
+ assert err.value.error_code == 3009
+
+ # Test replace service
+ service = db.foxx.replace_service(
+ mount=service_mount,
+ source=service_file,
+ config={},
+ dependencies={},
+ teardown=True,
+ setup=True,
+ legacy=True,
+ force=False
+ )
+ assert service['mount'] == service_mount
+ assert service['name'] == 'test'
+ assert service['legacy'] is True
+
+ # Test replace missing service
+ with assert_raises(FoxxServiceReplaceError) as err:
+ db.foxx.replace_service(missing_mount, 'service.zip')
+ assert err.value.error_code == 3009
+
+ assert db.foxx.delete_service(service_mount, teardown=False) is True
+ assert service_mount not in extract('mount', db.foxx.services())
+
+ # Test delete missing service
+ with assert_raises(FoxxServiceDeleteError) as err:
+ db.foxx.delete_service(missing_mount, teardown=False)
+ assert err.value.error_code == 3009
+
+
+def test_foxx_config_management(db):
+ service_mount = generate_service_mount()
+ missing_mount = generate_service_mount()
+
+ # Prep the test service
+ db.foxx.create_service(
+ mount=service_mount,
+ source=service_file,
+ config={},
+ )
+
+ # Test get service config
+ assert db.foxx.config(service_mount) == {}
+
+ # Test get missing service config
+ with assert_raises(FoxxConfigGetError) as err:
+ db.foxx.config(missing_mount)
+ assert err.value.error_code == 3009
+
+ # Test update service config
+ assert db.foxx.update_config(service_mount, {}) == {'values': {}}
+
+ # Test update missing service config
+ with assert_raises(FoxxConfigUpdateError) as err:
+ db.foxx.update_config(missing_mount, {})
+ assert err.value.error_code == 3009
+
+ # Test replace service config
+ assert db.foxx.replace_config(service_mount, {}) == {'values': {}}
+
+ # Test replace missing service config
+ with assert_raises(FoxxConfigReplaceError) as err:
+ db.foxx.replace_config(missing_mount, {})
+ assert err.value.error_code == 3009
+
+
+def test_foxx_dependency_management(db):
+ service_mount = generate_service_mount()
+ missing_mount = generate_service_mount()
+
+ # Prep the test service
+ db.foxx.create_service(
+ mount=service_mount,
+ source=service_file,
+ dependencies={}
+ )
+
+ # Test get service dependencies
+ assert db.foxx.dependencies(service_mount) == {}
+
+ # Test get missing service dependencies
+ with assert_raises(FoxxDependencyGetError) as err:
+ db.foxx.dependencies(missing_mount)
+ assert err.value.error_code == 3009
+
+ # Test update service dependencies
+ assert db.foxx.update_dependencies(service_mount, {}) == {'values': {}}
+
+ # Test update missing service dependencies
+ with assert_raises(FoxxDependencyUpdateError) as err:
+ db.foxx.update_dependencies(missing_mount, {})
+ assert err.value.error_code == 3009
+
+ # Test replace service dependencies
+ assert db.foxx.replace_dependencies(service_mount, {}) == {'values': {}}
+
+ # Test replace missing service dependencies
+ with assert_raises(FoxxDependencyReplaceError) as err:
+ db.foxx.replace_dependencies(missing_mount, {})
+ assert err.value.error_code == 3009
+
+
+def test_foxx_development_toggle(db):
+ service_mount = generate_service_mount()
+ missing_mount = generate_service_mount()
+
+ # Prep the test service
+ db.foxx.create_service(
+ mount=service_mount,
+ source=service_file,
+ development=False,
+ )
+
+ # Test enable development mode
+ service = db.foxx.enable_development(service_mount)
+ assert service['mount'] == service_mount
+ assert service['name'] == 'test'
+ assert service['development'] is True
+
+ # Test enable development mode for missing service
+ with assert_raises(FoxxDevModeEnableError) as err:
+ db.foxx.enable_development(missing_mount)
+ assert err.value.error_code == 3009
+
+ # Test disable development mode
+ service = db.foxx.disable_development(service_mount)
+ assert service['mount'] == service_mount
+ assert service['name'] == 'test'
+ assert service['development'] is False
+
+ # Test disable development mode for missing service
+ with assert_raises(FoxxDevModeDisableError) as err:
+ db.foxx.disable_development(missing_mount)
+ assert err.value.error_code == 3009
+
+
+def test_foxx_misc_functions(db, bad_db):
+ service_mount = generate_service_mount()
+ missing_mount = generate_service_mount()
+
+ # Prep the test service
+ db.foxx.create_service(
+ mount=service_mount,
+ source=service_file,
+ )
+
+ # Test get service readme
+ assert 'Apache 2' in db.foxx.readme(service_mount)
+
+ # Test get missing service readme
+ with assert_raises(FoxxReadmeGetError) as err:
+ db.foxx.readme(missing_mount)
+ assert err.value.error_code == 3009
+
+ # Test get service swagger
+ swagger = db.foxx.swagger(service_mount)
+ assert 'swagger' in swagger
+ assert 'paths' in swagger
+ assert 'info' in swagger
+ assert 'base_path' in swagger
+
+ # Test get missing service swagger
+ with assert_raises(FoxxSwaggerGetError) as err:
+ db.foxx.swagger(missing_mount)
+ assert err.value.error_code == 3009
+
+ # Test download service
+ assert isinstance(db.foxx.download(service_mount), string_types)
+
+ # Test download missing service
+ with assert_raises(FoxxDownloadError) as err:
+ db.foxx.download(missing_mount)
+ assert err.value.error_code == 3009
+
+ # Test commit service state
+ assert db.foxx.commit(replace=True) is True
+ assert db.foxx.commit(replace=False) is True
+
+ # Test commit service state with bad database
+ with assert_raises(FoxxCommitError) as err:
+ bad_db.foxx.commit(replace=True)
+ assert err.value.error_code == 1228
+
+ # Test list service scripts
+ scripts = db.foxx.scripts(service_mount)
+ assert 'setup' in scripts
+ assert 'teardown' in scripts
+
+ # Test list missing service scripts
+ with assert_raises(FoxxScriptListError) as err:
+ db.foxx.scripts(missing_mount)
+ assert err.value.error_code == 3009
+
+ # Test run service script
+ assert db.foxx.run_script(service_mount, 'setup', []) == {}
+ assert db.foxx.run_script(service_mount, 'teardown', []) == {}
+
+ # Test run missing service script
+ with assert_raises(FoxxScriptRunError) as err:
+ db.foxx.run_script(service_mount, 'invalid', ())
+ assert err.value.error_code == 3016
+
+ # Test run tests on service
+ result_string = db.foxx.run_tests(
+ mount=service_mount,
+ reporter='suite',
+ idiomatic=True
+ )
+ result_json = json.loads(result_string)
+ assert 'stats' in result_json
+ assert 'tests' in result_json
+
+ result_string = db.foxx.run_tests(
+ mount=service_mount,
+ reporter='stream',
+ output_format='x-ldjson'
+ )
+ for result_part in result_string.split('\r\n'):
+ if len(result_part) == 0:
+ continue
+ assert result_part.startswith('[')
+ assert result_part.endswith(']')
+
+ result_string = db.foxx.run_tests(
+ mount=service_mount,
+ reporter='stream',
+ output_format='text'
+ )
+ assert result_string.startswith('[[')
+ assert result_string.endswith(']]')
+
+ result_string = db.foxx.run_tests(
+ mount=service_mount,
+ reporter='xunit',
+ output_format='xml'
+ )
+ assert result_string.strip().startswith('<')
+ assert result_string.strip().endswith('>')
+
+ # Test run tests on missing service
+ with assert_raises(FoxxTestRunError) as err:
+ db.foxx.run_tests(missing_mount)
+ assert err.value.error_code == 3009
diff --git a/tests/test_graph.py b/tests/test_graph.py
index 33f5577a..1c3d09d5 100644
--- a/tests/test_graph.py
+++ b/tests/test_graph.py
@@ -1,82 +1,54 @@
from __future__ import absolute_import, unicode_literals
-import pytest
from six import string_types
-from arango import ArangoClient
-from arango.collections import (
- EdgeCollection,
- VertexCollection
-)
-from arango.exceptions import *
-from .utils import (
- generate_db_name,
+from arango.collection import EdgeCollection
+from arango.exceptions import (
+ DocumentDeleteError,
+ DocumentGetError,
+ DocumentInsertError,
+ DocumentParseError,
+ DocumentReplaceError,
+ DocumentRevisionError,
+ DocumentUpdateError,
+ EdgeDefinitionListError,
+ EdgeDefinitionCreateError,
+ EdgeDefinitionDeleteError,
+ EdgeDefinitionReplaceError,
+ GraphListError,
+ GraphCreateError,
+ GraphDeleteError,
+ GraphPropertiesError,
+ GraphTraverseError,
+ VertexCollectionCreateError,
+ VertexCollectionDeleteError,
+ VertexCollectionListError,
+ EdgeListError)
+from tests.helpers import (
+ assert_raises,
+ clean_doc,
+ extract,
generate_col_name,
generate_graph_name,
- clean_keys
+ generate_doc_key,
)
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-graph_name = generate_graph_name()
-graph = db.create_graph(graph_name)
-bad_graph_name = generate_graph_name()
-bad_graph = db.graph(bad_graph_name)
-bad_col_name = generate_col_name()
-bad_vcol = bad_graph.vertex_collection(bad_col_name)
-bad_ecol = bad_graph.edge_collection(bad_col_name)
-
-# vertices in test vertex collection #1
-vertex1 = {'_key': '1', 'value': 1}
-vertex2 = {'_key': '2', 'value': 2}
-vertex3 = {'_key': '3', 'value': 3}
-
-# vertices in test vertex collection #2
-vertex4 = {'_key': '4', 'value': 4}
-vertex5 = {'_key': '5', 'value': 5}
-vertex6 = {'_key': '6', 'value': 6}
-
-# edges in test edge collection
-edge1 = {'_key': '1', '_from': 'vcol1/1', '_to': 'vcol3/4'} # valid
-edge2 = {'_key': '2', '_from': 'vcol1/1', '_to': 'vcol3/5'} # valid
-edge3 = {'_key': '3', '_from': 'vcol3/6', '_to': 'vcol1/2'} # invalid
-edge4 = {'_key': '4', '_from': 'vcol1/8', '_to': 'vcol3/7'} # missing
-
-# new edges that will be updated/replaced to
-edge5 = {'_key': '1', '_from': 'vcol1/1', '_to': 'vcol3/5'} # valid
-edge6 = {'_key': '1', '_from': 'vcol3/6', '_to': 'vcol1/2'} # invalid
-edge7 = {'_key': '1', '_from': 'vcol1/8', '_to': 'vcol3/7'} # missing
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
-
-def setup_function(*_):
- col.truncate()
+def test_graph_properties(graph, bad_graph, db):
+ assert repr(graph) == ''.format(graph.name)
-
-@pytest.mark.order1
-def test_properties():
- assert graph.name == graph_name
- assert repr(graph) == (
- "".format(graph_name)
- )
properties = graph.properties()
- assert properties['id'] == '_graphs/{}'.format(graph_name)
- assert properties['name'] == graph_name
- assert properties['edge_definitions'] == []
- assert properties['orphan_collections'] == []
- assert isinstance(properties['revision'], string_types)
- assert properties['smart'] == False
+ assert properties['id'] == '_graphs/{}'.format(graph.name)
+ assert properties['name'] == graph.name
+ assert len(properties['edge_definitions']) == 1
+ assert len(properties['orphan_collections']) == 2
+ assert 'smart' in properties
assert 'smart_field' in properties
assert 'shard_count' in properties
+ assert isinstance(properties['revision'], string_types)
- # Test if exception is raised properly
- with pytest.raises(GraphPropertiesError):
+ # Test properties with bad database
+ with assert_raises(GraphPropertiesError):
bad_graph.properties()
new_graph_name = generate_graph_name()
@@ -93,774 +65,754 @@ def test_properties():
assert properties['edge_definitions'] == []
assert properties['orphan_collections'] == []
assert isinstance(properties['revision'], string_types)
+
# TODO only possible with enterprise edition
- # assert properties['smart'] == True
+ # assert properties['smart'] is True
# assert properties['smart_field'] == 'foo'
# assert properties['shard_count'] == 2
-@pytest.mark.order2
-def test_create_vertex_collection():
- # Test preconditions
- assert graph.vertex_collections() == []
- vcol1 = graph.create_vertex_collection('vcol1')
- assert isinstance(vcol1, VertexCollection)
- assert vcol1.name == 'vcol1'
- assert vcol1.name in repr(vcol1)
- assert graph.name in repr(vcol1)
- assert graph.name == vcol1.graph_name
- assert graph.vertex_collections() == ['vcol1']
- assert graph.orphan_collections() == ['vcol1']
- assert 'vcol1' in set(c['name'] for c in db.collections())
+def test_graph_management(db, bad_db):
+ # Test create graph
+ graph_name = generate_graph_name()
+ assert db.has_graph(graph_name) is False
+
+ graph = db.create_graph(graph_name)
+ assert db.has_graph(graph_name) is True
+ assert graph.name == graph_name
+ assert graph.db_name == db.name
+
+ # Test create duplicate graph
+ with assert_raises(GraphCreateError) as err:
+ db.create_graph(graph_name)
+ assert err.value.error_code == 1925
+
+ # Test get graph
+ result = db.graph(graph_name)
+ assert result.name == graph.name
+ assert result.db_name == graph.db_name
+
+ # Test get graphs
+ result = db.graphs()
+ for entry in result:
+ assert 'revision' in entry
+ assert 'edge_definitions' in entry
+ assert 'orphan_collections' in entry
+ assert graph_name in extract('name', db.graphs())
+
+ # Test get graphs with bad database
+ with assert_raises(GraphListError) as err:
+ bad_db.graphs()
+ assert err.value.error_code == 1228
+
+ # Test delete graph
+ assert db.delete_graph(graph_name) is True
+ assert graph_name not in extract('name', db.graphs())
+
+ # Test delete missing graph
+ with assert_raises(GraphDeleteError) as err:
+ db.delete_graph(graph_name)
+ assert err.value.error_code == 1924
+ assert db.delete_graph(graph_name, ignore_missing=True) is False
+
+ # Create a graph with vertex and edge collections and delete the graph
+ graph = db.create_graph(graph_name)
+ ecol_name = generate_col_name()
+ fvcol_name = generate_col_name()
+ tvcol_name = generate_col_name()
+
+ graph.create_vertex_collection(fvcol_name)
+ graph.create_vertex_collection(tvcol_name)
+ graph.create_edge_definition(
+ edge_collection=ecol_name,
+ from_vertex_collections=[fvcol_name],
+ to_vertex_collections=[tvcol_name]
+ )
+ collections = extract('name', db.collections())
+ assert fvcol_name in collections
+ assert tvcol_name in collections
+ assert ecol_name in collections
+
+ db.delete_graph(graph_name)
+ collections = extract('name', db.collections())
+ assert fvcol_name in collections
+ assert tvcol_name in collections
+ assert ecol_name in collections
+
+ # Create a graph with vertex and edge collections and delete all
+ graph = db.create_graph(graph_name)
+ graph.create_edge_definition(
+ edge_collection=ecol_name,
+ from_vertex_collections=[fvcol_name],
+ to_vertex_collections=[tvcol_name]
+ )
+ db.delete_graph(graph_name, drop_collections=True)
+ collections = extract('name', db.collections())
+ assert fvcol_name not in collections
+ assert tvcol_name not in collections
+ assert ecol_name not in collections
+
+
+def test_vertex_collection_management(db, graph, bad_graph):
+ # Test create valid "from" vertex collection
+ fvcol_name = generate_col_name()
+ assert not graph.has_vertex_collection(fvcol_name)
+ assert not db.has_collection(fvcol_name)
+
+ fvcol = graph.create_vertex_collection(fvcol_name)
+ assert graph.has_vertex_collection(fvcol_name)
+ assert db.has_collection(fvcol_name)
+ assert fvcol.name == fvcol_name
+ assert fvcol.graph == graph.name
+ assert fvcol_name in repr(fvcol)
+ assert fvcol_name in graph.vertex_collections()
+ assert fvcol_name in extract('name', db.collections())
# Test create duplicate vertex collection
- with pytest.raises(VertexCollectionCreateError):
- graph.create_vertex_collection('vcol1')
- assert graph.vertex_collections() == ['vcol1']
- assert graph.orphan_collections() == ['vcol1']
- assert 'vcol1' in set(c['name'] for c in db.collections())
-
- # Test create valid vertex collection
- vcol2 = graph.create_vertex_collection('vcol2')
- assert isinstance(vcol2, VertexCollection)
- assert vcol2.name == 'vcol2'
- assert sorted(graph.vertex_collections()) == ['vcol1', 'vcol2']
- assert graph.orphan_collections() == ['vcol1', 'vcol2']
- assert 'vcol1' in set(c['name'] for c in db.collections())
- assert 'vcol2' in set(c['name'] for c in db.collections())
-
-
-@pytest.mark.order3
-def test_list_vertex_collections():
- assert graph.vertex_collections() == ['vcol1', 'vcol2']
-
- # Test if exception is raised properly
- with pytest.raises(VertexCollectionListError):
+ with assert_raises(VertexCollectionCreateError) as err:
+ graph.create_vertex_collection(fvcol_name)
+ assert err.value.error_code == 1938
+ assert fvcol_name in graph.vertex_collections()
+ assert fvcol_name in extract('name', db.collections())
+
+ # Test create valid "to" vertex collection
+ tvcol_name = generate_col_name()
+ assert not graph.has_vertex_collection(tvcol_name)
+ assert not db.has_collection(tvcol_name)
+
+ tvcol = graph.create_vertex_collection(tvcol_name)
+ assert graph.has_vertex_collection(tvcol_name)
+ assert db.has_collection(tvcol_name)
+ assert tvcol_name == tvcol_name
+ assert tvcol.graph == graph.name
+ assert tvcol_name in repr(tvcol)
+ assert tvcol_name in graph.vertex_collections()
+ assert tvcol_name in extract('name', db.collections())
+
+ # Test list vertex collection via bad database
+ with assert_raises(VertexCollectionListError) as err:
bad_graph.vertex_collections()
- with pytest.raises(OrphanCollectionListError):
- bad_graph.orphan_collections()
-
-
-@pytest.mark.order4
-def test_delete_vertex_collection():
- # Test preconditions
- assert sorted(graph.vertex_collections()) == ['vcol1', 'vcol2']
- assert graph.delete_vertex_collection('vcol1') is True
- assert graph.vertex_collections() == ['vcol2']
- assert 'vcol1' in set(c['name'] for c in db.collections())
+ assert err.value.error_code == 1228
# Test delete missing vertex collection
- with pytest.raises(VertexCollectionDeleteError):
- graph.delete_vertex_collection('vcol1')
-
- # Test delete vertex collection with purge option
- assert graph.delete_vertex_collection('vcol2', purge=True) is True
- assert graph.vertex_collections() == []
- assert 'vcol1' in set(c['name'] for c in db.collections())
- assert 'vcol2' not in set(c['name'] for c in db.collections())
-
-
-@pytest.mark.order5
-def test_create_edge_definition():
- # Test preconditions
- assert graph.edge_definitions() == []
-
- ecol1 = graph.create_edge_definition('ecol1', [], [])
- assert isinstance(ecol1, EdgeCollection)
- assert ecol1.name == 'ecol1'
- assert ecol1.name in repr(ecol1)
- assert graph.name in repr(ecol1)
- assert graph.name == ecol1.graph_name
-
- assert graph.edge_definitions() == [{
- 'name': 'ecol1',
- 'from_collections': [],
- 'to_collections': []
- }]
- assert 'ecol1' in set(c['name'] for c in db.collections())
+ with assert_raises(VertexCollectionDeleteError) as err:
+ graph.delete_vertex_collection(generate_col_name())
+ assert err.value.error_code == 1926
+
+ # Test delete "to" vertex collection with purge option
+ assert graph.delete_vertex_collection(tvcol_name, purge=True) is True
+ assert tvcol_name not in graph.vertex_collections()
+ assert fvcol_name in extract('name', db.collections())
+ assert tvcol_name not in extract('name', db.collections())
+ assert not graph.has_vertex_collection(tvcol_name)
+
+ # Test delete "from" vertex collection without purge option
+ assert graph.delete_vertex_collection(fvcol_name, purge=False) is True
+ assert fvcol_name not in graph.vertex_collections()
+ assert fvcol_name in extract('name', db.collections())
+ assert not graph.has_vertex_collection(fvcol_name)
+
+
+def test_edge_definition_management(db, graph, bad_graph):
+ ecol_name = generate_col_name()
+ assert not graph.has_edge_definition(ecol_name)
+ assert not graph.has_edge_collection(ecol_name)
+ assert not db.has_collection(ecol_name)
+
+ ecol = graph.create_edge_definition(ecol_name, [], [])
+ assert graph.has_edge_definition(ecol_name)
+ assert graph.has_edge_collection(ecol_name)
+ assert db.has_collection(ecol_name)
+ assert isinstance(ecol, EdgeCollection)
+
+ ecol = graph.edge_collection(ecol_name)
+ assert ecol.name == ecol_name
+ assert ecol.name in repr(ecol)
+ assert ecol.graph == graph.name
+ assert {
+ 'edge_collection': ecol_name,
+ 'from_vertex_collections': [],
+ 'to_vertex_collections': []
+ } in graph.edge_definitions()
+ assert ecol_name in extract('name', db.collections())
# Test create duplicate edge definition
- with pytest.raises(EdgeDefinitionCreateError):
- assert graph.create_edge_definition('ecol1', [], [])
- assert graph.edge_definitions() == [{
- 'name': 'ecol1',
- 'from_collections': [],
- 'to_collections': []
- }]
+ with assert_raises(EdgeDefinitionCreateError) as err:
+ graph.create_edge_definition(ecol_name, [], [])
+ assert err.value.error_code == 1920
# Test create edge definition with existing vertex collection
- vcol1 = graph.create_vertex_collection('vcol1')
- assert isinstance(vcol1, VertexCollection)
- assert vcol1.name == 'vcol1'
- vcol2 = graph.create_vertex_collection('vcol2')
- assert isinstance(vcol2, VertexCollection)
- assert vcol2.name == 'vcol2'
- ecol2 = graph.create_edge_definition(
- name='ecol2',
- from_collections=['vcol1'],
- to_collections=['vcol2']
+ fvcol_name = generate_col_name()
+ tvcol_name = generate_col_name()
+ ecol_name = generate_col_name()
+ ecol = graph.create_edge_definition(
+ edge_collection=ecol_name,
+ from_vertex_collections=[fvcol_name],
+ to_vertex_collections=[tvcol_name]
)
- assert isinstance(ecol1, EdgeCollection)
- assert ecol2.name == 'ecol2'
- assert graph.edge_definitions() == [
- {
- 'name': 'ecol1',
- 'from_collections': [],
- 'to_collections': []
- },
- {
- 'name': 'ecol2',
- 'from_collections': ['vcol1'],
- 'to_collections': ['vcol2']
- }
- ]
- assert 'ecol2' in set(c['name'] for c in db.collections())
+ assert ecol.name == ecol_name
+ assert {
+ 'edge_collection': ecol_name,
+ 'from_vertex_collections': [fvcol_name],
+ 'to_vertex_collections': [tvcol_name]
+ } in graph.edge_definitions()
+ assert ecol_name in extract('name', db.collections())
+
+ vertex_collections = graph.vertex_collections()
+ assert fvcol_name in vertex_collections
+ assert tvcol_name in vertex_collections
# Test create edge definition with missing vertex collection
- ecol3 = graph.create_edge_definition(
- name='ecol3',
- from_collections=['vcol3'],
- to_collections=['vcol3']
+ bad_vcol_name = generate_col_name()
+ ecol_name = generate_col_name()
+ ecol = graph.create_edge_definition(
+ edge_collection=ecol_name,
+ from_vertex_collections=[bad_vcol_name],
+ to_vertex_collections=[bad_vcol_name]
)
- assert isinstance(ecol3, EdgeCollection)
- assert ecol3.name == 'ecol3'
- assert graph.edge_definitions() == [
- {
- 'name': 'ecol1',
- 'from_collections': [],
- 'to_collections': []
- },
- {
- 'name': 'ecol2',
- 'from_collections': ['vcol1'],
- 'to_collections': ['vcol2']
- },
- {
- 'name': 'ecol3',
- 'from_collections': ['vcol3'],
- 'to_collections': ['vcol3']
- }
- ]
- assert 'vcol3' in graph.vertex_collections()
- assert 'vcol3' not in graph.orphan_collections()
- assert 'vcol3' in set(c['name'] for c in db.collections())
- assert 'ecol3' in set(c['name'] for c in db.collections())
-
-
-@pytest.mark.order6
-def test_list_edge_definitions():
- assert graph.edge_definitions() == [
- {
- 'name': 'ecol1',
- 'from_collections': [],
- 'to_collections': []
- },
- {
- 'name': 'ecol2',
- 'from_collections': ['vcol1'],
- 'to_collections': ['vcol2']
- },
- {
- 'name': 'ecol3',
- 'from_collections': ['vcol3'],
- 'to_collections': ['vcol3']
- }
- ]
-
- # Test if exception is raised properly
- with pytest.raises(EdgeDefinitionListError):
+ assert graph.has_edge_definition(ecol_name)
+ assert graph.has_edge_collection(ecol_name)
+ assert ecol.name == ecol_name
+ assert {
+ 'edge_collection': ecol_name,
+ 'from_vertex_collections': [bad_vcol_name],
+ 'to_vertex_collections': [bad_vcol_name]
+ } in graph.edge_definitions()
+ assert bad_vcol_name in graph.vertex_collections()
+ assert bad_vcol_name in extract('name', db.collections())
+ assert bad_vcol_name in extract('name', db.collections())
+
+ # Test list edge definition with bad database
+ with assert_raises(EdgeDefinitionListError) as err:
bad_graph.edge_definitions()
+ assert err.value.error_code == 1228
-
-@pytest.mark.order7
-def test_replace_edge_definition():
- assert graph.replace_edge_definition(
- name='ecol1',
- from_collections=['vcol3'],
- to_collections=['vcol2']
- ) is True
- assert graph.orphan_collections() == ['vcol1']
- assert graph.edge_definitions() == [
- {
- 'name': 'ecol1',
- 'from_collections': ['vcol3'],
- 'to_collections': ['vcol2']
- },
- {
- 'name': 'ecol2',
- 'from_collections': ['vcol1'],
- 'to_collections': ['vcol2']
- },
- {
- 'name': 'ecol3',
- 'from_collections': ['vcol3'],
- 'to_collections': ['vcol3']
- }
- ]
- assert graph.replace_edge_definition(
- name='ecol2',
- from_collections=['vcol1'],
- to_collections=['vcol3']
- ) is True
- assert graph.orphan_collections() == []
- assert 'vcol3' not in graph.orphan_collections()
- assert graph.replace_edge_definition(
- name='ecol3',
- from_collections=['vcol4'],
- to_collections=['vcol4']
- ) is True
- with pytest.raises(EdgeDefinitionReplaceError):
+ # Test replace edge definition (happy path)
+ ecol = graph.replace_edge_definition(
+ edge_collection=ecol_name,
+ from_vertex_collections=[tvcol_name],
+ to_vertex_collections=[fvcol_name]
+ )
+ assert isinstance(ecol, EdgeCollection)
+ assert ecol.name == ecol_name
+ assert {
+ 'edge_collection': ecol_name,
+ 'from_vertex_collections': [tvcol_name],
+ 'to_vertex_collections': [fvcol_name]
+ } in graph.edge_definitions()
+
+ # Test replace missing edge definition
+ bad_ecol_name = generate_col_name()
+ with assert_raises(EdgeDefinitionReplaceError):
graph.replace_edge_definition(
- name='ecol4',
- from_collections=[],
- to_collections=['vcol1']
+ edge_collection=bad_ecol_name,
+ from_vertex_collections=[],
+ to_vertex_collections=[fvcol_name]
)
- assert graph.edge_definitions() == [
- {
- 'name': 'ecol1',
- 'from_collections': ['vcol3'],
- 'to_collections': ['vcol2']
- },
- {
- 'name': 'ecol2',
- 'from_collections': ['vcol1'],
- 'to_collections': ['vcol3']
- },
- {
- 'name': 'ecol3',
- 'from_collections': ['vcol4'],
- 'to_collections': ['vcol4']
- }
- ]
- assert graph.orphan_collections() == []
-
-
-@pytest.mark.order8
-def test_delete_edge_definition():
- assert graph.delete_edge_definition('ecol3') is True
- assert graph.edge_definitions() == [
- {
- 'name': 'ecol1',
- 'from_collections': ['vcol3'],
- 'to_collections': ['vcol2']
- },
- {
- 'name': 'ecol2',
- 'from_collections': ['vcol1'],
- 'to_collections': ['vcol3']
- }
- ]
- assert graph.orphan_collections() == ['vcol4']
- assert 'vcol4' in graph.vertex_collections()
- assert 'vcol4' in set(c['name'] for c in db.collections())
- assert 'ecol3' in set(c['name'] for c in db.collections())
-
- with pytest.raises(EdgeDefinitionDeleteError):
- graph.delete_edge_definition('ecol3')
-
- assert graph.delete_edge_definition('ecol1', purge=True) is True
- assert graph.edge_definitions() == [
- {
- 'name': 'ecol2',
- 'from_collections': ['vcol1'],
- 'to_collections': ['vcol3']
- }
- ]
- assert sorted(graph.orphan_collections()) == ['vcol2', 'vcol4']
- assert 'ecol1' not in set(c['name'] for c in db.collections())
- assert 'ecol2' in set(c['name'] for c in db.collections())
- assert 'ecol3' in set(c['name'] for c in db.collections())
-
-
-@pytest.mark.order9
-def test_create_graph_with_vertices_ane_edges():
- new_graph_name = generate_graph_name()
- edge_definitions = [
- {
- 'name': 'ecol1',
- 'from_collections': ['vcol3'],
- 'to_collections': ['vcol2']
- }
- ]
- new_graph = db.create_graph(
- new_graph_name,
- edge_definitions=edge_definitions,
- orphan_collections=['vcol1']
- )
- assert new_graph.edge_definitions() == edge_definitions
- assert new_graph.orphan_collections() == ['vcol1']
+ # Test delete missing edge definition
+ with assert_raises(EdgeDefinitionDeleteError) as err:
+ graph.delete_edge_definition(bad_ecol_name)
+ assert err.value.error_code == 1930
-@pytest.mark.order10
-def test_insert_vertex():
- vcol = graph.vertex_collection('vcol1')
+ # Test delete existing edge definition with purge
+ assert graph.delete_edge_definition(ecol_name, purge=True) is True
+ assert {
+ 'edge_collection': ecol_name,
+ 'from_vertex_collections': [tvcol_name],
+ 'to_vertex_collections': [fvcol_name]
+ } not in graph.edge_definitions()
+ assert ecol_name not in extract('name', db.collections())
+ assert not graph.has_edge_definition(ecol_name)
+ assert not graph.has_edge_collection(ecol_name)
- # Test preconditions
- assert '1' not in vcol
- assert len(vcol) == 0
- # Test insert first vertex
- result = vcol.insert(vertex1)
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
- assert isinstance(result['_rev'], string_types)
- assert '1' in vcol
- assert len(vcol) == 1
- assert vcol['1']['value'] == 1
-
- # Test insert vertex into missing collection
- with pytest.raises(DocumentInsertError):
- assert bad_vcol.insert(vertex2)
- assert '2' not in vcol
- assert len(vcol) == 1
+def test_create_graph_with_edge_definition(db):
+ new_graph_name = generate_graph_name()
+ new_ecol_name = generate_col_name()
+ fvcol_name = generate_col_name()
+ tvcol_name = generate_col_name()
+ ovcol_name = generate_col_name()
+
+ edge_definition = {
+ 'edge_collection': new_ecol_name,
+ 'from_vertex_collections': [fvcol_name],
+ 'to_vertex_collections': [tvcol_name]
+ }
+ new_graph = db.create_graph(
+ new_graph_name,
+ edge_definitions=[edge_definition],
+ orphan_collections=[ovcol_name]
+ )
+ assert edge_definition in new_graph.edge_definitions()
+
+
+def test_vertex_management(fvcol, bad_fvcol, fvdocs):
+ # Test insert vertex with no key
+ result = fvcol.insert({})
+ assert result['_key'] in fvcol
+ assert len(fvcol) == 1
+ fvcol.truncate()
+
+ # Test insert vertex with ID
+ vertex_id = fvcol.name + '/' + 'foo'
+ fvcol.insert({'_id': vertex_id})
+ assert 'foo' in fvcol
+ assert vertex_id in fvcol
+ assert len(fvcol) == 1
+ fvcol.truncate()
+
+ with assert_raises(DocumentParseError) as err:
+ fvcol.insert({'_id': generate_col_name() + '/' + 'foo'})
+ assert 'bad collection name' in err.value.message
+
+ vertex = fvdocs[0]
+ key = vertex['_key']
+
+ # Test insert first valid vertex
+ result = fvcol.insert(vertex)
+ assert result['_key'] == key
+ assert '_rev' in result
+ assert vertex in fvcol and key in fvcol
+ assert len(fvcol) == 1
+ assert fvcol[key]['val'] == vertex['val']
# Test insert duplicate vertex
- with pytest.raises(DocumentInsertError):
- assert vcol.insert(vertex1)
- assert len(vcol) == 1
+ with assert_raises(DocumentInsertError) as err:
+ fvcol.insert(vertex)
+ assert err.value.error_code == 1210
+ assert len(fvcol) == 1
- # Test insert second vertex
- result = vcol.insert(vertex2, sync=True)
- assert result['_id'] == 'vcol1/2'
- assert result['_key'] == '2'
- assert isinstance(result['_rev'], string_types)
- assert '2' in vcol
- assert len(vcol) == 2
- assert vcol['2']['value'] == 2
+ vertex = fvdocs[1]
+ key = vertex['_key']
- # Test insert duplicate vertex second time
- with pytest.raises(DocumentInsertError):
- assert vcol.insert(vertex2)
+ # Test insert second valid vertex
+ result = fvcol.insert(vertex, sync=True)
+ assert result['_key'] == key
+ assert vertex in fvcol and key in fvcol
+ assert len(fvcol) == 2
+ assert fvcol[key]['val'] == vertex['val']
+ vertex = fvdocs[2]
+ key = vertex['_key']
-@pytest.mark.order11
-def test_get_vertex():
- vcol = graph.vertex_collection('vcol1')
+ # Test insert third valid vertex with silent set to True
+ assert fvcol.insert(vertex, silent=True) is True
+ assert len(fvcol) == 3
+ assert fvcol[key]['val'] == vertex['val']
# Test get missing vertex
- assert vcol.get('0') is None
+ assert fvcol.get(generate_doc_key()) is None
- # Test get existing vertex
- result = vcol.get('1')
- old_rev = result['_rev']
- assert clean_keys(result) == {'_key': '1', 'value': 1}
+ # Test get existing edge by body with "_key" field
+ result = fvcol.get({'_key': key})
+ assert clean_doc(result) == vertex
- # Test get existing vertex with wrong revision
- with pytest.raises(ArangoError):
- vcol.get('1', rev=old_rev + '1')
+ # Test get existing edge by body with "_id" field
+ result = fvcol.get({'_id': fvcol.name + '/' + key})
+ assert clean_doc(result) == vertex
- # Test get existing vertex from missing vertex collection
- with pytest.raises(DocumentGetError):
- bad_vcol.get('1')
+ # Test get existing vertex by key
+ result = fvcol.get(key)
+ assert clean_doc(result) == vertex
- # Test get existing vertex again
- assert clean_keys(vcol.get('2')) == {'_key': '2', 'value': 2}
+ # Test get existing vertex by ID
+ result = fvcol.get(fvcol.name + '/' + key)
+ assert clean_doc(result) == vertex
+ # Test get existing vertex with bad revision
+ old_rev = result['_rev']
+ with assert_raises(DocumentRevisionError) as err:
+ fvcol.get(key, rev=old_rev + '1', check_rev=True)
+ assert err.value.error_code == 1903
-@pytest.mark.order12
-def test_update_vertex():
- vcol = graph.vertex_collection('vcol1')
+ # Test get existing vertex with bad database
+ with assert_raises(DocumentGetError) as err:
+ bad_fvcol.get(key)
+ assert err.value.error_code == 1228
# Test update vertex with a single field change
- assert 'foo' not in vcol.get('1')
- result = vcol.update({'_key': '1', 'foo': 100})
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
- assert vcol['1']['foo'] == 100
- old_rev = vcol['1']['_rev']
+ assert 'foo' not in fvcol.get(key)
+ result = fvcol.update({'_key': key, 'foo': 100})
+ assert result['_key'] == key
+ assert fvcol[key]['foo'] == 100
+ old_rev = fvcol[key]['_rev']
+
+ # Test update vertex with silent set to True
+ assert 'bar' not in fvcol[vertex]
+ assert fvcol.update({'_key': key, 'bar': 200}, silent=True) is True
+ assert fvcol[vertex]['bar'] == 200
+ assert fvcol[vertex]['_rev'] != old_rev
+ old_rev = fvcol[key]['_rev']
# Test update vertex with multiple field changes
- result = vcol.update({'_key': '1', 'foo': 200, 'bar': 300})
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
+ result = fvcol.update({'_key': key, 'foo': 200, 'bar': 300})
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert vcol['1']['foo'] == 200
- assert vcol['1']['bar'] == 300
+ assert fvcol[key]['foo'] == 200
+ assert fvcol[key]['bar'] == 300
old_rev = result['_rev']
# Test update vertex with correct revision
- result = vcol.update({'_key': '1', '_rev': old_rev, 'bar': 400})
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
+ result = fvcol.update({'_key': key, '_rev': old_rev, 'bar': 400})
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert vcol['1']['foo'] == 200
- assert vcol['1']['bar'] == 400
+ assert fvcol[key]['foo'] == 200
+ assert fvcol[key]['bar'] == 400
old_rev = result['_rev']
- # Test update vertex with incorrect revision
+ # Test update vertex with bad revision
new_rev = old_rev + '1'
- with pytest.raises(DocumentRevisionError):
- vcol.update({'_key': '1', '_rev': new_rev, 'bar': 500})
- assert vcol['1']['foo'] == 200
- assert vcol['1']['bar'] == 400
+ with assert_raises(DocumentRevisionError) as err:
+ fvcol.update({'_key': key, '_rev': new_rev, 'bar': 500})
+ assert err.value.error_code == 1903
+ assert fvcol[key]['foo'] == 200
+ assert fvcol[key]['bar'] == 400
# Test update vertex in missing vertex collection
- with pytest.raises(DocumentUpdateError):
- bad_vcol.update({'_key': '1', 'bar': 500})
- assert vcol['1']['foo'] == 200
- assert vcol['1']['bar'] == 400
-
- # Test update vertex with sync option
- result = vcol.update({'_key': '1', 'bar': 500}, sync=True)
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
+ with assert_raises(DocumentUpdateError) as err:
+ bad_fvcol.update({'_key': key, 'bar': 500})
+ assert err.value.error_code == 1228
+ assert fvcol[key]['foo'] == 200
+ assert fvcol[key]['bar'] == 400
+
+ # Test update vertex with sync set to True
+ result = fvcol.update({'_key': key, 'bar': 500}, sync=True)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert vcol['1']['foo'] == 200
- assert vcol['1']['bar'] == 500
+ assert fvcol[key]['foo'] == 200
+ assert fvcol[key]['bar'] == 500
old_rev = result['_rev']
- # Test update vertex with keep_none option
- result = vcol.update({'_key': '1', 'bar': None}, keep_none=True)
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
+ # Test update vertex with keep_none set to True
+ result = fvcol.update({'_key': key, 'bar': None}, keep_none=True)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert vcol['1']['foo'] == 200
- assert vcol['1']['bar'] is None
+ assert fvcol[key]['foo'] == 200
+ assert fvcol[key]['bar'] is None
old_rev = result['_rev']
- # Test update vertex without keep_none option
- result = vcol.update({'_key': '1', 'foo': None}, keep_none=False)
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
+ # Test update vertex with keep_none set to False
+ result = fvcol.update({'_key': key, 'foo': None}, keep_none=False)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert 'foo' not in vcol['1']
- assert vcol['1']['bar'] is None
-
-
-@pytest.mark.order13
-def test_replace_vertex():
- vcol = graph.vertex_collection('vcol1')
-
- # Test preconditions
- assert 'bar' in vcol.get('1')
- assert 'value' in vcol.get('1')
+ assert 'foo' not in fvcol[key]
+ assert fvcol[key]['bar'] is None
# Test replace vertex with a single field change
- result = vcol.replace({'_key': '1', 'baz': 100})
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
- assert 'foo' not in vcol['1']
- assert 'bar' not in vcol['1']
- assert vcol['1']['baz'] == 100
+ result = fvcol.replace({'_key': key, 'baz': 100})
+ assert result['_key'] == key
+ assert 'foo' not in fvcol[key]
+ assert 'bar' not in fvcol[key]
+ assert fvcol[key]['baz'] == 100
old_rev = result['_rev']
+ # Test replace vertex with silent set to True
+ assert fvcol.replace({'_key': key, 'bar': 200}, silent=True) is True
+ assert 'foo' not in fvcol[key]
+ assert 'baz' not in fvcol[vertex]
+ assert fvcol[vertex]['bar'] == 200
+ assert len(fvcol) == 3
+ assert fvcol[vertex]['_rev'] != old_rev
+ old_rev = fvcol[vertex]['_rev']
+
# Test replace vertex with multiple field changes
- vertex = {'_key': '1', 'foo': 200, 'bar': 300}
- result = vcol.replace(vertex)
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
+ vertex = {'_key': key, 'foo': 200, 'bar': 300}
+ result = fvcol.replace(vertex)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert clean_keys(vcol['1']) == vertex
+ assert clean_doc(fvcol[key]) == vertex
old_rev = result['_rev']
# Test replace vertex with correct revision
- vertex = {'_key': '1', '_rev': old_rev, 'bar': 500}
- result = vcol.replace(vertex)
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
+ vertex = {'_key': key, '_rev': old_rev, 'bar': 500}
+ result = fvcol.replace(vertex)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert clean_keys(vcol['1']) == clean_keys(vertex)
+ assert clean_doc(fvcol[key]) == clean_doc(vertex)
old_rev = result['_rev']
- # Test replace vertex with incorrect revision
+ # Test replace vertex with bad revision
new_rev = old_rev + '10'
- vertex = {'_key': '1', '_rev': new_rev, 'bar': 600}
- with pytest.raises(DocumentRevisionError):
- vcol.replace(vertex)
- assert vcol['1']['bar'] == 500
- assert 'foo' not in vcol['1']
-
- # Test replace vertex in missing vertex collection
- with pytest.raises(DocumentReplaceError):
- bad_vcol.replace({'_key': '1', 'bar': 600})
- assert vcol['1']['bar'] == 500
- assert 'foo' not in vcol['1']
-
- # Test replace vertex with sync option
- vertex = {'_key': '1', 'bar': 400, 'foo': 200}
- result = vcol.replace(vertex, sync=True)
- assert result['_id'] == 'vcol1/1'
- assert result['_key'] == '1'
+ vertex = {'_key': key, '_rev': new_rev, 'bar': 600}
+ with assert_raises(DocumentRevisionError) as err:
+ fvcol.replace(vertex)
+ assert err.value.error_code == 1903
+ assert fvcol[key]['bar'] == 500
+ assert 'foo' not in fvcol[key]
+
+ # Test replace vertex with bad database
+ with assert_raises(DocumentReplaceError) as err:
+ bad_fvcol.replace({'_key': key, 'bar': 600})
+ assert err.value.error_code == 1228
+ assert fvcol[key]['bar'] == 500
+ assert 'foo' not in fvcol[key]
+
+ # Test replace vertex with sync set to True
+ vertex = {'_key': key, 'bar': 400, 'foo': 200}
+ result = fvcol.replace(vertex, sync=True)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert vcol['1']['foo'] == 200
- assert vcol['1']['bar'] == 400
-
-
-@pytest.mark.order14
-def test_delete_vertex():
- vcol = graph.vertex_collection('vcol1')
- vcol.truncate()
-
- vcol.insert(vertex1)
- vcol.insert(vertex2)
- vcol.insert(vertex3)
-
- # Test delete existing vertex
- assert vcol.delete(vertex1) is True
- assert vcol['1'] is None
- assert '1' not in vcol
-
- # Test delete existing vertex with sync
- assert vcol.delete(vertex3, sync=True) is True
- assert vcol['3'] is None
- assert '3' not in vcol
-
- # Test delete vertex with incorrect revision
- old_rev = vcol['2']['_rev']
- vertex2['_rev'] = old_rev + '10'
- with pytest.raises(DocumentRevisionError):
- vcol.delete(vertex2)
- assert '2' in vcol
-
- with pytest.raises(DocumentDeleteError):
- bad_vcol.delete({'_key': '10', '_rev': 'boo'}, ignore_missing=True)
- assert '2' in vcol
-
- # Test delete vertex from missing collection
- with pytest.raises(DocumentDeleteError):
- bad_vcol.delete(vertex1, ignore_missing=False)
+ assert fvcol[key]['foo'] == 200
+ assert fvcol[key]['bar'] == 400
+
+ # Test delete vertex with bad revision
+ old_rev = fvcol[key]['_rev']
+ vertex['_rev'] = old_rev + '1'
+ with assert_raises(DocumentRevisionError) as err:
+ fvcol.delete(vertex, check_rev=True)
+ assert err.value.error_code == 1903
+ vertex['_rev'] = old_rev
+ assert vertex in fvcol
# Test delete missing vertex
- with pytest.raises(DocumentDeleteError):
- vcol.delete({'_key': '10'}, ignore_missing=False)
-
- # Test delete missing vertex while ignoring missing
- assert vcol.delete({'_key': '10'}, ignore_missing=True) is False
-
-
-@pytest.mark.order15
-def test_insert_edge():
- ecol = graph.edge_collection('ecol2')
+ bad_key = generate_doc_key()
+ with assert_raises(DocumentDeleteError) as err:
+ fvcol.delete(bad_key, ignore_missing=False)
+ assert err.value.error_code == 1202
+ if fvcol.context != 'transaction':
+ assert fvcol.delete(bad_key, ignore_missing=True) is False
+
+ # Test delete existing vertex with sync set to True
+ assert fvcol.delete(vertex, sync=True, check_rev=False) is True
+ assert fvcol[vertex] is None
+ assert vertex not in fvcol
+ assert len(fvcol) == 2
+ fvcol.truncate()
+
+
+def test_vertex_management_via_graph(graph, fvcol):
+ # Test insert vertex via graph object
+ result = graph.insert_vertex(fvcol.name, {})
+ assert result['_key'] in fvcol
+ assert len(fvcol) == 1
+ vertex_id = result['_id']
+
+ # Test get vertex via graph object
+ assert graph.vertex(vertex_id)['_id'] == vertex_id
+
+ # Test update vertex via graph object
+ result = graph.update_vertex({'_id': vertex_id, 'foo': 100})
+ assert result['_id'] == vertex_id
+ assert fvcol[vertex_id]['foo'] == 100
+
+ # Test replace vertex via graph object
+ result = graph.replace_vertex({'_id': vertex_id, 'bar': 200})
+ assert result['_id'] == vertex_id
+ assert 'foo' not in fvcol[vertex_id]
+ assert fvcol[vertex_id]['bar'] == 200
+
+ # Test delete vertex via graph object
+ assert graph.delete_vertex(vertex_id) is True
+ assert vertex_id not in fvcol
+ assert len(fvcol) == 0
+
+
+def test_edge_management(ecol, bad_ecol, edocs, fvcol, fvdocs, tvcol, tvdocs):
+ for vertex in fvdocs:
+ fvcol.insert(vertex)
+ for vertex in tvdocs:
+ tvcol.insert(vertex)
+
+ edge = edocs[0]
+ key = edge['_key']
+
+ # Test insert edge with no key
+ result = ecol.insert({'_from': edge['_from'], '_to': edge['_to']})
+ assert result['_key'] in ecol
+ assert len(ecol) == 1
ecol.truncate()
- vcol1 = db.collection('vcol1')
- vcol1.truncate()
- vcol1.import_bulk([vertex1, vertex2, vertex3])
-
- vcol3 = db.collection('vcol3')
- vcol3.truncate()
- vcol3.import_bulk([vertex4, vertex5, vertex6])
+ # Test insert vertex with ID
+ edge_id = ecol.name + '/' + 'foo'
+ ecol.insert({
+ '_id': edge_id,
+ '_from': edge['_from'],
+ '_to': edge['_to']
+ })
+ assert 'foo' in ecol
+ assert edge_id in ecol
+ assert len(ecol) == 1
+ ecol.truncate()
- # Test preconditions
- assert '1' not in ecol
- assert len(ecol) == 0
- assert len(vcol1) == 3
- assert len(vcol3) == 3
+ with assert_raises(DocumentParseError) as err:
+ ecol.insert({
+ '_id': generate_col_name() + '/' + 'foo',
+ '_from': edge['_from'],
+ '_to': edge['_to']
+ })
+ assert 'bad collection name' in err.value.message
# Test insert first valid edge
- result = ecol.insert(edge1)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
- assert isinstance(result['_rev'], string_types)
- assert '1' in ecol
- assert len(ecol) == 1
- assert ecol['1']['_from'] == 'vcol1/1'
- assert ecol['1']['_to'] == 'vcol3/4'
-
- # Test insert valid edge into missing collection
- with pytest.raises(DocumentInsertError):
- assert bad_ecol.insert(edge2)
- assert '2' not in ecol
+ result = ecol.insert(edge)
+ assert result['_key'] == key
+ assert '_rev' in result
+ assert edge in ecol and key in ecol
assert len(ecol) == 1
+ assert ecol[key]['_from'] == edge['_from']
+ assert ecol[key]['_to'] == edge['_to']
# Test insert duplicate edge
- with pytest.raises(DocumentInsertError):
- assert ecol.insert(edge1)
+ with assert_raises(DocumentInsertError) as err:
+ assert ecol.insert(edge)
+ assert err.value.error_code == 1906
assert len(ecol) == 1
- # Test insert second valid edge
- result = ecol.insert(edge2, sync=True)
- assert result['_id'] == 'ecol2/2'
- assert result['_key'] == '2'
- assert '2' in ecol
- assert len(ecol) == 2
- assert ecol['2']['_from'] == 'vcol1/1'
- assert ecol['2']['_to'] == 'vcol3/5'
- old_rev = result['_rev']
+ edge = edocs[1]
+ key = edge['_key']
- # Test insert duplicate edge second time
- with pytest.raises(DocumentInsertError):
- assert ecol.insert(edge2)
- assert ecol['2']['_from'] == 'vcol1/1'
- assert ecol['2']['_to'] == 'vcol3/5'
- assert ecol['2']['_rev'] == old_rev
-
- # Test insert invalid edge (from and to mixed up)
- with pytest.raises(DocumentInsertError):
- ecol.insert(edge3)
- assert ecol['2']['_from'] == 'vcol1/1'
- assert ecol['2']['_to'] == 'vcol3/5'
- assert ecol['2']['_rev'] == old_rev
-
- # Test insert invalid edge (missing vertices)
- result = ecol.insert(edge4)
- assert result['_id'] == 'ecol2/4'
- assert result['_key'] == '4'
- assert isinstance(result['_rev'], string_types)
- assert '4' in ecol
+ # Test insert second valid edge with silent set to True
+ assert ecol.insert(edge, sync=True, silent=True) is True
+ assert edge in ecol and key in ecol
+ assert len(ecol) == 2
+ assert ecol[key]['_from'] == edge['_from']
+ assert ecol[key]['_to'] == edge['_to']
+
+ # Test insert third valid edge using link method
+ from_vertex = fvcol.get(fvdocs[2])
+ to_vertex = tvcol.get(tvdocs[2])
+ result = ecol.link(from_vertex, to_vertex, sync=False)
+ assert result['_key'] in ecol
assert len(ecol) == 3
- assert ecol['4']['_from'] == 'vcol1/8'
- assert ecol['4']['_to'] == 'vcol3/7'
- assert len(vcol1) == 3
- assert len(vcol3) == 3
- assert '4' not in vcol1
- assert 'd' not in vcol3
+ # Test insert fourth valid edge using link method
+ from_vertex = fvcol.get(fvdocs[2])
+ to_vertex = tvcol.get(tvdocs[0])
+ assert ecol.link(
+ from_vertex['_id'],
+ to_vertex['_id'],
+ {'_id': ecol.name + '/foo'},
+ sync=True,
+ silent=True
+ ) is True
+ assert 'foo' in ecol
+ assert len(ecol) == 4
-@pytest.mark.order16
-def test_get_edge():
- ecol = graph.edge_collection('ecol2')
- ecol.truncate()
- for edge in [edge1, edge2, edge4]:
- ecol.insert(edge)
+ with assert_raises(DocumentParseError) as err:
+ assert ecol.link({}, {})
+ assert err.value.message == 'field "_id" required'
- # Test get missing edge
- assert ecol.get('0') is None
+ # Test get missing vertex
+ bad_document_key = generate_doc_key()
+ assert ecol.get(bad_document_key) is None
- # Test get existing edge
- result = ecol.get('1')
- old_rev = result['_rev']
- assert clean_keys(result) == edge1
+ # Test get existing edge by body with "_key" field
+ result = ecol.get({'_key': key})
+ assert clean_doc(result) == edge
- # Test get existing edge with wrong revision
- with pytest.raises(DocumentRevisionError):
- ecol.get('1', rev=old_rev + '1')
+ # Test get existing edge by body with "_id" field
+ result = ecol.get({'_id': ecol.name + '/' + key})
+ assert clean_doc(result) == edge
- # Test get existing edge from missing edge collection
- with pytest.raises(DocumentGetError):
- bad_ecol.get('1')
+ # Test get existing edge by key
+ result = ecol.get(key)
+ assert clean_doc(result) == edge
- # Test get existing edge again
- assert clean_keys(ecol.get('2')) == edge2
+ # Test get existing edge by ID
+ result = ecol.get(ecol.name + '/' + key)
+ assert clean_doc(result) == edge
+ # Test get existing edge with bad revision
+ old_rev = result['_rev']
+ with assert_raises(DocumentRevisionError) as err:
+ ecol.get(key, rev=old_rev + '1')
+ assert err.value.error_code == 1903
-@pytest.mark.order17
-def test_update_edge():
- ecol = graph.edge_collection('ecol2')
- ecol.truncate()
- ecol.insert(edge1)
+ # Test get existing vertex with bad database
+ with assert_raises(DocumentGetError) as err:
+ bad_ecol.get(key)
+ assert err.value.error_code == 1228
# Test update edge with a single field change
- assert 'foo' not in ecol.get('1')
- result = ecol.update({'_key': '1', 'foo': 100})
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
- assert ecol['1']['foo'] == 100
- old_rev = ecol['1']['_rev']
+ assert 'foo' not in ecol.get(key)
+ result = ecol.update({'_key': key, 'foo': 100})
+ assert result['_key'] == key
+ assert ecol[key]['foo'] == 100
+ old_rev = ecol[key]['_rev']
# Test update edge with multiple field changes
- result = ecol.update({'_key': '1', 'foo': 200, 'bar': 300})
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
+ result = ecol.update({'_key': key, 'foo': 200, 'bar': 300})
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert ecol['1']['foo'] == 200
- assert ecol['1']['bar'] == 300
+ assert ecol[key]['foo'] == 200
+ assert ecol[key]['bar'] == 300
old_rev = result['_rev']
# Test update edge with correct revision
- result = ecol.update({'_key': '1', '_rev': old_rev, 'bar': 400})
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
+ result = ecol.update({'_key': key, '_rev': old_rev, 'bar': 400})
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert ecol['1']['foo'] == 200
- assert ecol['1']['bar'] == 400
+ assert ecol[key]['foo'] == 200
+ assert ecol[key]['bar'] == 400
old_rev = result['_rev']
- # Test update edge with incorrect revision
+ # Test update edge with bad revision
new_rev = old_rev + '1'
- with pytest.raises(DocumentRevisionError):
- ecol.update({'_key': '1', '_rev': new_rev, 'bar': 500})
- assert ecol['1']['foo'] == 200
- assert ecol['1']['bar'] == 400
+ with assert_raises(DocumentRevisionError):
+ ecol.update({'_key': key, '_rev': new_rev, 'bar': 500})
+ assert ecol[key]['foo'] == 200
+ assert ecol[key]['bar'] == 400
# Test update edge in missing edge collection
- with pytest.raises(DocumentUpdateError):
- bad_ecol.update({'_key': '1', 'bar': 500})
- assert ecol['1']['foo'] == 200
- assert ecol['1']['bar'] == 400
+ with assert_raises(DocumentUpdateError) as err:
+ bad_ecol.update({'_key': key, 'bar': 500})
+ assert err.value.error_code == 1228
+ assert ecol[key]['foo'] == 200
+ assert ecol[key]['bar'] == 400
# Test update edge with sync option
- result = ecol.update({'_key': '1', 'bar': 500}, sync=True)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
+ result = ecol.update({'_key': key, 'bar': 500}, sync=True)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert ecol['1']['foo'] == 200
- assert ecol['1']['bar'] == 500
+ assert ecol[key]['foo'] == 200
+ assert ecol[key]['bar'] == 500
old_rev = result['_rev']
+ # Test update edge with silent option
+ assert ecol.update({'_key': key, 'bar': 600}, silent=True) is True
+ assert ecol[key]['foo'] == 200
+ assert ecol[key]['bar'] == 600
+ assert ecol[key]['_rev'] != old_rev
+ old_rev = ecol[key]['_rev']
+
# Test update edge without keep_none option
- result = ecol.update({'_key': '1', 'bar': None}, keep_none=True)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
+ result = ecol.update({'_key': key, 'bar': None}, keep_none=True)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert ecol['1']['foo'] == 200
- assert ecol['1']['bar'] is None
+ assert ecol[key]['foo'] == 200
+ assert ecol[key]['bar'] is None
old_rev = result['_rev']
# Test update edge with keep_none option
- result = ecol.update({'_key': '1', 'foo': None}, keep_none=False)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
+ result = ecol.update({'_key': key, 'foo': None}, keep_none=False)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert 'foo' not in ecol['1']
- assert ecol['1']['bar'] is None
- old_rev = result['_rev']
-
- # Test update edge to a valid edge
- result = ecol.update(edge5)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
- assert result['_old_rev'] == old_rev
- assert ecol['1']['_from'] == 'vcol1/1'
- assert ecol['1']['_to'] == 'vcol3/5'
- old_rev = result['_rev']
-
- # Test update edge to a missing edge
- result = ecol.update(edge7)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
- assert result['_old_rev'] == old_rev
- assert ecol['1']['_from'] == 'vcol1/8'
- assert ecol['1']['_to'] == 'vcol3/7'
- old_rev = result['_rev']
-
- # TODO why is this succeeding?
- # Test update edge to a invalid edge (from and to mixed up)
- result = ecol.update(edge6)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
- assert result['_old_rev'] == old_rev
- assert ecol['1']['_from'] == 'vcol3/6'
- assert ecol['1']['_to'] == 'vcol1/2'
- assert ecol['1']['_rev'] != old_rev
-
-
-@pytest.mark.order18
-def test_replace_edge():
- ecol = graph.edge_collection('ecol2')
- ecol.truncate()
- ecol.insert(edge1)
-
- edge = edge1.copy()
+ assert 'foo' not in ecol[key]
+ assert ecol[key]['bar'] is None
# Test replace edge with a single field change
- assert 'foo' not in ecol.get('1')
edge['foo'] = 100
result = ecol.replace(edge)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
- assert ecol['1']['foo'] == 100
- old_rev = ecol['1']['_rev']
+ assert result['_key'] == key
+ assert ecol[key]['foo'] == 100
+ old_rev = ecol[key]['_rev']
+
+ # Test replace edge with silent set to True
+ edge['bar'] = 200
+ assert ecol.replace(edge, silent=True) is True
+ assert ecol[key]['foo'] == 100
+ assert ecol[key]['bar'] == 200
+ assert ecol[key]['_rev'] != old_rev
+ old_rev = ecol[key]['_rev']
# Test replace edge with multiple field changes
edge['foo'] = 200
edge['bar'] = 300
result = ecol.replace(edge)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert ecol['1']['foo'] == 200
- assert ecol['1']['bar'] == 300
+ assert ecol[key]['foo'] == 200
+ assert ecol[key]['bar'] == 300
old_rev = result['_rev']
# Test replace edge with correct revision
@@ -868,116 +820,199 @@ def test_replace_edge():
edge['bar'] = 400
edge['_rev'] = old_rev
result = ecol.replace(edge)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert ecol['1']['foo'] == 300
- assert ecol['1']['bar'] == 400
+ assert ecol[key]['foo'] == 300
+ assert ecol[key]['bar'] == 400
old_rev = result['_rev']
- # Test replace edge with incorrect revision
+ # Test replace edge with bad revision
edge['bar'] = 500
- edge['_rev'] = old_rev + '1'
- with pytest.raises(DocumentRevisionError):
+ edge['_rev'] = old_rev + key
+ with assert_raises(DocumentRevisionError) as err:
ecol.replace(edge)
- assert ecol['1']['foo'] == 300
- assert ecol['1']['bar'] == 400
+ assert err.value.error_code == 1903
+ assert ecol[key]['foo'] == 300
+ assert ecol[key]['bar'] == 400
- # Test replace edge in missing edge collection
- with pytest.raises(DocumentReplaceError):
+ # Test replace edge with bad database
+ with assert_raises(DocumentReplaceError) as err:
bad_ecol.replace(edge)
- assert ecol['1']['foo'] == 300
- assert ecol['1']['bar'] == 400
+ assert err.value.error_code == 1228
+ assert ecol[key]['foo'] == 300
+ assert ecol[key]['bar'] == 400
# Test replace edge with sync option
- edge['_rev'] = None
- result = ecol.replace(edge, sync=True)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
+ result = ecol.replace(edge, sync=True, check_rev=False)
+ assert result['_key'] == key
assert result['_old_rev'] == old_rev
- assert ecol['1']['foo'] == 300
- assert ecol['1']['bar'] == 500
- old_rev = result['_rev']
+ assert ecol[key]['foo'] == 300
+ assert ecol[key]['bar'] == 500
- # Test replace edge to a valid edge
- result = ecol.replace(edge5)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
- assert result['_old_rev'] == old_rev
- assert ecol['1']['_from'] == 'vcol1/1'
- assert ecol['1']['_to'] == 'vcol3/5'
- old_rev = result['_rev']
+ # Test delete edge with bad revision
+ old_rev = ecol[key]['_rev']
+ edge['_rev'] = old_rev + '1'
+ with assert_raises(DocumentRevisionError) as err:
+ ecol.delete(edge, check_rev=True)
+ assert err.value.error_code == 1903
+ edge['_rev'] = old_rev
+ assert edge in ecol
- # Test replace edge to a missing edge
- result = ecol.replace(edge7)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
- assert result['_old_rev'] == old_rev
- assert ecol['1']['_from'] == 'vcol1/8'
- assert ecol['1']['_to'] == 'vcol3/7'
- old_rev = result['_rev']
+ # Test delete missing edge
+ with assert_raises(DocumentDeleteError) as err:
+ ecol.delete(bad_document_key, ignore_missing=False)
+ assert err.value.error_code == 1202
+ assert not ecol.delete(bad_document_key, ignore_missing=True)
+
+ # Test delete existing edge with sync set to True
+ assert ecol.delete(edge, sync=True, check_rev=False) is True
+ assert ecol[edge] is None
+ assert edge not in ecol
+ ecol.truncate()
- # TODO why is this succeeding?
- # Test replace edge to a invalid edge (from and to mixed up)
- result = ecol.replace(edge6)
- assert result['_id'] == 'ecol2/1'
- assert result['_key'] == '1'
- assert result['_old_rev'] == old_rev
- assert ecol['1']['_from'] == 'vcol3/6'
- assert ecol['1']['_to'] == 'vcol1/2'
- assert ecol['1']['_rev'] != old_rev
+def test_vertex_edges(db, bad_db):
+ graph_name = generate_graph_name()
+ vcol_name = generate_col_name()
+ ecol_name = generate_col_name()
-@pytest.mark.order19
-def test_delete_edge():
- ecol = graph.edge_collection('ecol2')
+ # Prepare test documents
+ anna = {'_id': '{}/anna'.format(vcol_name)}
+ dave = {'_id': '{}/dave'.format(vcol_name)}
+ josh = {'_id': '{}/josh'.format(vcol_name)}
+ mary = {'_id': '{}/mary'.format(vcol_name)}
+ tony = {'_id': '{}/tony'.format(vcol_name)}
+
+ # Create test graph, vertex and edge collections
+ school = db.create_graph(graph_name)
+
+ vcol = school.create_vertex_collection(vcol_name)
+ ecol = school.create_edge_definition(
+ edge_collection=ecol_name,
+ from_vertex_collections=[vcol_name],
+ to_vertex_collections=[vcol_name]
+ )
+ # Insert test vertices into the graph
+ vcol.insert(anna)
+ vcol.insert(dave)
+ vcol.insert(josh)
+ vcol.insert(mary)
+ vcol.insert(tony)
+
+ # Insert test edges into the graph
+ ecol.link(anna, dave)
+ ecol.link(josh, dave)
+ ecol.link(mary, dave)
+ ecol.link(tony, dave)
+ ecol.link(dave, anna)
+
+ # Test edges with default direction (both)
+ result = ecol.edges(dave)
+ assert 'stats' in result
+ assert 'filtered' in result['stats']
+ assert 'scanned_index' in result['stats']
+ assert len(result['edges']) == 5
+
+ result = ecol.edges(anna)
+ assert len(result['edges']) == 2
+
+ # Test edges with direction set to "in"
+ result = ecol.edges(dave, direction='in')
+ assert len(result['edges']) == 4
+
+ result = ecol.edges(anna, direction='in')
+ assert len(result['edges']) == 1
+
+ # Test edges with direction set to "out"
+ result = ecol.edges(dave, direction='out')
+ assert len(result['edges']) == 1
+
+ result = ecol.edges(anna, direction='out')
+ assert len(result['edges']) == 1
+
+ bad_graph = bad_db.graph(graph_name)
+ with assert_raises(EdgeListError) as err:
+ bad_graph.edge_collection(ecol_name).edges(dave)
+ assert err.value.error_code == 1228
+
+
+def test_edge_management_via_graph(graph, ecol, fvcol, fvdocs, tvcol, tvdocs):
+ for vertex in fvdocs:
+ fvcol.insert(vertex)
+ for vertex in tvdocs:
+ tvcol.insert(vertex)
ecol.truncate()
- for edge in [edge1, edge2, edge4]:
- ecol.insert(edge)
-
- # Test delete existing edge
- assert ecol.delete(edge1) is True
- assert ecol['1'] is None
- assert '1' not in ecol
-
- # Test delete existing edge with sync
- assert ecol.delete(edge4, sync=True) is True
- assert ecol['3'] is None
- assert '3' not in ecol
-
- # Test delete edge with incorrect revision
- old_rev = ecol['2']['_rev']
- edge2['_rev'] = old_rev + '1'
- with pytest.raises(DocumentRevisionError):
- ecol.delete(edge2)
- assert '2' in ecol
-
- # Test delete edge from missing collection
- with pytest.raises(DocumentDeleteError):
- bad_ecol.delete(edge1, ignore_missing=False)
- # Test delete missing edge
- with pytest.raises(DocumentDeleteError):
- ecol.delete(edge3, ignore_missing=False)
+ # Get a random "from" vertex
+ from_vertex = fvcol.random()
+ assert graph.has_vertex(from_vertex)
- # Test delete missing edge while ignoring missing
- assert ecol.delete(edge3, ignore_missing=True) == False
+ # Get a random "to" vertex
+ to_vertex = tvcol.random()
+ assert graph.has_vertex(to_vertex)
+ # Test insert edge via graph object
+ result = graph.insert_edge(
+ ecol.name,
+ {'_from': from_vertex['_id'], '_to': to_vertex['_id']}
+ )
+ assert result['_key'] in ecol
+ assert graph.has_edge(result['_id'])
+ assert len(ecol) == 1
+
+ # Test link vertices via graph object
+ result = graph.link(ecol.name, from_vertex, to_vertex)
+ assert result['_key'] in ecol
+ assert len(ecol) == 2
+ edge_id = result['_id']
+
+ # Test get edge via graph object
+ assert graph.edge(edge_id)['_id'] == edge_id
+
+ # Test list edges via graph object
+ result = graph.edges(ecol.name, from_vertex, direction='out')
+ assert 'stats' in result
+ assert len(result['edges']) == 2
+
+ result = graph.edges(ecol.name, from_vertex, direction='in')
+ assert 'stats' in result
+ assert len(result['edges']) == 0
+
+ # Test update edge via graph object
+ result = graph.update_edge({'_id': edge_id, 'foo': 100})
+ assert result['_id'] == edge_id
+ assert ecol[edge_id]['foo'] == 100
+
+ # Test replace edge via graph object
+ result = graph.replace_edge({
+ '_id': edge_id,
+ '_from': from_vertex['_id'],
+ '_to': to_vertex['_id'],
+ 'bar': 200
+ })
+ assert result['_id'] == edge_id
+ assert 'foo' not in ecol[edge_id]
+ assert ecol[edge_id]['bar'] == 200
+
+ # Test delete edge via graph object
+ assert graph.delete_edge(edge_id) is True
+ assert edge_id not in ecol
+ assert len(ecol) == 1
-@pytest.mark.order20
-def test_traverse():
+
+def test_traverse(db):
# Create test graph, vertex and edge collections
- curriculum = db.create_graph('curriculum')
- professors = curriculum.create_vertex_collection('profs')
- classes = curriculum.create_vertex_collection('classes')
- teaches = curriculum.create_edge_definition(
- name='teaches',
- from_collections=['profs'],
- to_collections=['classes']
+ school = db.create_graph(generate_graph_name())
+ profs = school.create_vertex_collection(generate_col_name())
+ classes = school.create_vertex_collection(generate_col_name())
+ teaches = school.create_edge_definition(
+ edge_collection=generate_col_name(),
+ from_vertex_collections=[profs.name],
+ to_vertex_collections=[classes.name]
)
# Insert test vertices into the graph
- professors.insert({'_key': 'anna', 'name': 'Professor Anna'})
- professors.insert({'_key': 'andy', 'name': 'Professor Andy'})
+ profs.insert({'_key': 'anna', 'name': 'Professor Anna'})
+ profs.insert({'_key': 'andy', 'name': 'Professor Andy'})
classes.insert({'_key': 'CSC101', 'name': 'Introduction to CS'})
classes.insert({'_key': 'MAT223', 'name': 'Linear Algebra'})
classes.insert({'_key': 'STA201', 'name': 'Statistics'})
@@ -985,85 +1020,108 @@ def test_traverse():
classes.insert({'_key': 'MAT102', 'name': 'Calculus II'})
# Insert test edges into the graph
- teaches.insert({'_from': 'profs/anna', '_to': 'classes/CSC101'})
- teaches.insert({'_from': 'profs/anna', '_to': 'classes/STA201'})
- teaches.insert({'_from': 'profs/anna', '_to': 'classes/MAT223'})
- teaches.insert({'_from': 'profs/andy', '_to': 'classes/MAT101'})
- teaches.insert({'_from': 'profs/andy', '_to': 'classes/MAT102'})
- teaches.insert({'_from': 'profs/andy', '_to': 'classes/MAT223'})
+ teaches.insert({
+ '_from': '{}/anna'.format(profs.name),
+ '_to': '{}/CSC101'.format(classes.name)
+ })
+ teaches.insert({
+ '_from': '{}/anna'.format(profs.name),
+ '_to': '{}/STA201'.format(classes.name)
+ })
+ teaches.insert({
+ '_from': '{}/anna'.format(profs.name),
+ '_to': '{}/MAT223'.format(classes.name)
+ })
+ teaches.insert({
+ '_from': '{}/andy'.format(profs.name),
+ '_to': '{}/MAT101'.format(classes.name)
+ })
+ teaches.insert({
+ '_from': '{}/andy'.format(profs.name),
+ '_to': '{}/MAT102'.format(classes.name)
+ })
+ teaches.insert({
+ '_from': '{}/andy'.format(profs.name),
+ '_to': '{}/MAT223'.format(classes.name)
+ })
# Traverse the graph with default settings
- result = curriculum.traverse(start_vertex='profs/anna')
- assert set(result) == {'paths', 'vertices'}
+ result = school.traverse('{}/anna'.format(profs.name))
+ visited = extract('_key', result['vertices'])
+ assert visited == ['CSC101', 'MAT223', 'STA201', 'anna']
+
for path in result['paths']:
for vertex in path['vertices']:
assert set(vertex) == {'_id', '_key', '_rev', 'name'}
for edge in path['edges']:
assert set(edge) == {'_id', '_key', '_rev', '_to', '_from'}
- visited_vertices = sorted([v['_key'] for v in result['vertices']])
- assert visited_vertices == ['CSC101', 'MAT223', 'STA201', 'anna']
- result = curriculum.traverse(start_vertex='profs/andy')
- visited_vertices = sorted([v['_key'] for v in result['vertices']])
- assert visited_vertices == ['MAT101', 'MAT102', 'MAT223', 'andy']
+
+ result = school.traverse('{}/andy'.format(profs.name))
+ visited = extract('_key', result['vertices'])
+ assert visited == ['MAT101', 'MAT102', 'MAT223', 'andy']
# Traverse the graph with an invalid start vertex
- with pytest.raises(GraphTraverseError):
- curriculum.traverse(start_vertex='invalid')
- with pytest.raises(GraphTraverseError):
- curriculum.traverse(start_vertex='students/hanna')
- with pytest.raises(GraphTraverseError):
- curriculum.traverse(start_vertex='profs/anderson')
+ with assert_raises(GraphTraverseError):
+ school.traverse('invalid')
+
+ with assert_raises(GraphTraverseError):
+ bad_col_name = generate_col_name()
+ school.traverse('{}/hanna'.format(bad_col_name))
+
+ with assert_raises(GraphTraverseError):
+ school.traverse('{}/anderson'.format(profs.name))
# Travers the graph with max iteration of 0
- with pytest.raises(GraphTraverseError):
- curriculum.traverse(start_vertex='profs/andy', max_iter=0)
+ with assert_raises(GraphTraverseError):
+ school.traverse('{}/andy'.format(profs.name), max_iter=0)
# Traverse the graph with max depth of 0
- result = curriculum.traverse(start_vertex='profs/andy', max_depth=0)
- visited_vertices = sorted([v['_key'] for v in result['vertices']])
- assert visited_vertices == ['andy']
- result = curriculum.traverse(start_vertex='profs/anna', max_depth=0)
- visited_vertices = sorted([v['_key'] for v in result['vertices']])
- assert visited_vertices == ['anna']
+ result = school.traverse('{}/andy'.format(profs.name), max_depth=0)
+ assert extract('_key', result['vertices']) == ['andy']
+
+ result = school.traverse('{}/anna'.format(profs.name), max_depth=0)
+ assert extract('_key', result['vertices']) == ['anna']
# Traverse the graph with min depth of 2
- result = curriculum.traverse(start_vertex='profs/andy', min_depth=2)
- visited_vertices = sorted([v['_key'] for v in result['vertices']])
- assert visited_vertices == []
- result = curriculum.traverse(start_vertex='profs/anna', min_depth=2)
- visited_vertices = sorted([v['_key'] for v in result['vertices']])
- assert visited_vertices == []
+ result = school.traverse('{}/andy'.format(profs.name), min_depth=2)
+ assert extract('_key', result['vertices']) == []
+
+ result = school.traverse('{}/anna'.format(profs.name), min_depth=2)
+ assert extract('_key', result['vertices']) == []
# Traverse the graph with DFS and BFS
- result = curriculum.traverse(
- start_vertex='profs/anna',
+ result = school.traverse(
+ {'_id': '{}/anna'.format(profs.name)},
strategy='dfs',
direction='any',
)
- dfs_vertices = [v['_key'] for v in result['vertices']]
- result = curriculum.traverse(
- start_vertex='profs/anna',
+ dfs_vertices = extract('_key', result['vertices'])
+
+ result = school.traverse(
+ {'_id': '{}/anna'.format(profs.name)},
strategy='bfs',
direction='any'
)
- bfs_vertices = [v['_key'] for v in result['vertices']]
- assert dfs_vertices != bfs_vertices # the order should be different
+ bfs_vertices = extract('_key', result['vertices'])
+
assert sorted(dfs_vertices) == sorted(bfs_vertices)
# Traverse the graph with filter function
- result = curriculum.traverse(
- start_vertex='profs/andy',
+ result = school.traverse(
+ {'_id': '{}/andy'.format(profs.name)},
filter_func='if (vertex._key == "MAT101") {return "exclude";} return;'
)
- visited_vertices = sorted([v['_key'] for v in result['vertices']])
- assert visited_vertices == ['MAT102', 'MAT223', 'andy']
+ assert extract('_key', result['vertices']) == ['MAT102', 'MAT223', 'andy']
- # Traverse the graph with uniqueness (should be same as before)
- result = curriculum.traverse(
- start_vertex='profs/andy',
+ # Traverse the graph with global uniqueness (should be same as before)
+ result = school.traverse(
+ {'_id': '{}/andy'.format(profs.name)},
vertex_uniqueness='global',
edge_uniqueness='global',
filter_func='if (vertex._key == "MAT101") {return "exclude";} return;'
)
- visited_vertices = sorted([v['_key'] for v in result['vertices']])
- assert visited_vertices == ['MAT102', 'MAT223', 'andy']
+ assert extract('_key', result['vertices']) == ['MAT102', 'MAT223', 'andy']
+
+ with assert_raises(DocumentParseError) as err:
+ school.traverse({})
+ assert err.value.message == 'field "_id" required'
diff --git a/tests/test_index.py b/tests/test_index.py
index c096894a..5982e3c5 100644
--- a/tests/test_index.py
+++ b/tests/test_index.py
@@ -1,38 +1,15 @@
from __future__ import absolute_import, unicode_literals
-import pytest
-
-from arango import ArangoClient
from arango.exceptions import (
IndexListError,
IndexCreateError,
- IndexDeleteError
-)
-
-from .utils import (
- generate_db_name,
- generate_col_name
+ IndexDeleteError,
+ IndexLoadError
)
+from tests.helpers import assert_raises, extract
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-bad_col_name = generate_col_name()
-bad_col = db.collection(bad_col_name)
-col.add_geo_index(['coordinates'])
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
-
-def setup_function(*_):
- col.truncate()
-
-
-def test_list_indexes():
+def test_list_indexes(col, bad_col):
expected_index = {
'id': '0',
'selectivity': 1,
@@ -45,11 +22,12 @@ def test_list_indexes():
assert isinstance(indexes, list)
assert expected_index in indexes
- with pytest.raises(IndexListError):
+ with assert_raises(IndexListError) as err:
bad_col.indexes()
+ assert err.value.error_code == 1228
-def test_add_hash_index():
+def test_add_hash_index(col):
fields = ['attr1', 'attr2']
result = col.add_hash_index(
fields=fields,
@@ -73,7 +51,7 @@ def test_add_hash_index():
assert result in col.indexes()
-def test_add_skiplist_index():
+def test_add_skiplist_index(col):
fields = ['attr1', 'attr2']
result = col.add_skiplist_index(
fields=fields,
@@ -96,7 +74,7 @@ def test_add_skiplist_index():
assert result in col.indexes()
-def test_add_geo_index():
+def test_add_geo_index(col):
# Test add geo index with one attribute
result = col.add_geo_index(
fields=['attr1'],
@@ -138,15 +116,12 @@ def test_add_geo_index():
assert result in col.indexes()
# Test add geo index with more than two attributes (should fail)
- with pytest.raises(IndexCreateError):
+ with assert_raises(IndexCreateError) as err:
col.add_geo_index(fields=['attr1', 'attr2', 'attr3'])
+ assert err.value.error_code == 10
-def test_add_fulltext_index():
- # Test add fulltext index with two attributes (should fail)
- with pytest.raises(IndexCreateError):
- col.add_fulltext_index(fields=['attr1', 'attr2'])
-
+def test_add_fulltext_index(col):
# Test add fulltext index with one attributes
result = col.add_fulltext_index(
fields=['attr1'],
@@ -165,9 +140,14 @@ def test_add_fulltext_index():
result.pop('new', None)
assert result in col.indexes()
+ # Test add fulltext index with two attributes (should fail)
+ with assert_raises(IndexCreateError) as err:
+ col.add_fulltext_index(fields=['attr1', 'attr2'])
+ assert err.value.error_code == 10
-def test_add_persistent_index():
- # Test add fulltext index with one attributes
+
+def test_add_persistent_index(col):
+ # Test add persistent index with two attributes
result = col.add_persistent_index(
fields=['attr1', 'attr2'],
unique=True,
@@ -186,30 +166,42 @@ def test_add_persistent_index():
assert result in col.indexes()
-def test_delete_index():
- old_indexes = set(index['id'] for index in col.indexes())
+def test_delete_index(col, bad_col):
+ old_indexes = set(extract('id', col.indexes()))
col.add_hash_index(['attr3', 'attr4'], unique=True)
col.add_skiplist_index(['attr3', 'attr4'], unique=True)
col.add_fulltext_index(fields=['attr3'], min_length=10)
- new_indexes = set(index['id'] for index in col.indexes())
+ new_indexes = set(extract('id', col.indexes()))
assert new_indexes.issuperset(old_indexes)
indexes_to_delete = new_indexes - old_indexes
for index_id in indexes_to_delete:
assert col.delete_index(index_id) is True
- new_indexes = set(index['id'] for index in col.indexes())
+ new_indexes = set(extract('id', col.indexes()))
assert new_indexes == old_indexes
# Test delete missing indexes
for index_id in indexes_to_delete:
assert col.delete_index(index_id, ignore_missing=True) is False
for index_id in indexes_to_delete:
- with pytest.raises(IndexDeleteError):
+ with assert_raises(IndexDeleteError) as err:
col.delete_index(index_id, ignore_missing=False)
+ assert err.value.error_code == 1212
- # Test delete indexes in missing collection
+ # Test delete indexes with bad collection
for index_id in indexes_to_delete:
- with pytest.raises(IndexDeleteError):
+ with assert_raises(IndexDeleteError) as err:
bad_col.delete_index(index_id, ignore_missing=False)
+ assert err.value.error_code == 1228
+
+
+def test_load_indexes(col, bad_col):
+ # Test load indexes
+ assert col.load_indexes() is True
+
+ # Test load indexes with bad collection
+ with assert_raises(IndexLoadError) as err:
+ bad_col.load_indexes()
+ assert err.value.error_code == 1228
diff --git a/tests/test_permission.py b/tests/test_permission.py
new file mode 100644
index 00000000..4ede760a
--- /dev/null
+++ b/tests/test_permission.py
@@ -0,0 +1,140 @@
+from __future__ import absolute_import, unicode_literals
+
+from arango.exceptions import (
+ CollectionCreateError,
+ CollectionListError,
+ CollectionPropertiesError,
+ DocumentInsertError,
+ PermissionResetError,
+ PermissionGetError,
+ PermissionUpdateError,
+ PermissionListError
+)
+from tests.helpers import (
+ assert_raises,
+ extract,
+ generate_col_name,
+ generate_db_name,
+ generate_string,
+ generate_username
+)
+
+
+def test_permission_management(client, sys_db, bad_db):
+ username = generate_username()
+ password = generate_string()
+ db_name = generate_db_name()
+ col_name_1 = generate_col_name()
+ col_name_2 = generate_col_name()
+
+ sys_db.create_database(
+ name=db_name,
+ users=[{
+ 'username': username,
+ 'password': password,
+ 'active': True
+ }]
+ )
+ db = client.db(db_name, username, password)
+ assert isinstance(sys_db.permissions(username), dict)
+
+ # Test list permissions with bad database
+ with assert_raises(PermissionListError) as err:
+ bad_db.permissions(username)
+ assert err.value.error_code == 1228
+
+ # Test get permission with bad database
+ with assert_raises(PermissionGetError) as err:
+ bad_db.permission(username, db_name)
+ assert err.value.error_code == 1228
+
+ # The user should have read and write permissions
+ assert sys_db.permission(username, db_name) == 'rw'
+ assert sys_db.permission(username, db_name, col_name_1) == 'rw'
+ assert db.create_collection(col_name_1) is not None
+ assert col_name_1 in extract('name', db.collections())
+
+ # Test update permission (database level) to none and verify access
+ assert sys_db.update_permission(username, 'none', db_name) is True
+ assert sys_db.permission(username, db_name) == 'none'
+ with assert_raises(CollectionCreateError) as err:
+ db.create_collection(col_name_2)
+ assert err.value.http_code == 401
+ with assert_raises(CollectionListError) as err:
+ db.collections()
+ assert err.value.http_code == 401
+
+ # Test update permission (database level) with bad database
+ with assert_raises(PermissionUpdateError):
+ bad_db.update_permission(username, 'ro', db_name)
+ assert sys_db.permission(username, db_name) == 'none'
+
+ # Test update permission (database level) to read only and verify access
+ assert sys_db.update_permission(username, 'ro', db_name) is True
+ assert sys_db.permission(username, db_name) == 'ro'
+ with assert_raises(CollectionCreateError) as err:
+ db.create_collection(col_name_2)
+ assert err.value.http_code == 403
+ assert col_name_1 in extract('name', db.collections())
+ assert col_name_2 not in extract('name', db.collections())
+
+ # Test reset permission (database level) with bad database
+ with assert_raises(PermissionResetError) as err:
+ bad_db.reset_permission(username, db_name)
+ assert err.value.error_code == 1228
+ assert sys_db.permission(username, db_name) == 'ro'
+
+ # Test reset permission (database level) and verify access
+ assert sys_db.reset_permission(username, db_name) is True
+ assert sys_db.permission(username, db_name) == 'none'
+ with assert_raises(CollectionCreateError) as err:
+ db.create_collection(col_name_1)
+ assert err.value.http_code == 401
+ with assert_raises(CollectionListError) as err:
+ db.collections()
+ assert err.value.http_code == 401
+
+ # Test update permission (database level) and verify access
+ assert sys_db.update_permission(username, 'rw', db_name) is True
+ assert sys_db.permission(username, db_name, col_name_2) == 'rw'
+ assert db.create_collection(col_name_2) is not None
+ assert col_name_2 in extract('name', db.collections())
+
+ col_1 = db.collection(col_name_1)
+ col_2 = db.collection(col_name_2)
+
+ # Verify that user has read and write access to both collections
+ assert isinstance(col_1.properties(), dict)
+ assert isinstance(col_1.insert({}), dict)
+ assert isinstance(col_2.properties(), dict)
+ assert isinstance(col_2.insert({}), dict)
+
+ # Test update permission (collection level) to read only and verify access
+ assert sys_db.update_permission(username, 'ro', db_name, col_name_1)
+ assert sys_db.permission(username, db_name, col_name_1) == 'ro'
+ assert isinstance(col_1.properties(), dict)
+ with assert_raises(DocumentInsertError) as err:
+ col_1.insert({})
+ assert err.value.http_code == 403
+ assert isinstance(col_2.properties(), dict)
+ assert isinstance(col_2.insert({}), dict)
+
+ # Test update permission (collection level) to none and verify access
+ assert sys_db.update_permission(username, 'none', db_name, col_name_1)
+ assert sys_db.permission(username, db_name, col_name_1) == 'none'
+ with assert_raises(CollectionPropertiesError) as err:
+ col_1.properties()
+ assert err.value.http_code == 403
+ with assert_raises(DocumentInsertError) as err:
+ col_1.insert({})
+ assert err.value.http_code == 403
+ assert isinstance(col_2.properties(), dict)
+ assert isinstance(col_2.insert({}), dict)
+
+ # Test reset permission (collection level)
+ assert sys_db.reset_permission(username, db_name, col_name_1) is True
+ assert sys_db.permission(username, db_name, col_name_1) == 'rw'
+ assert isinstance(col_1.properties(), dict)
+ assert isinstance(col_1.insert({}), dict)
+ assert isinstance(col_2.properties(), dict)
+ assert isinstance(col_2.insert({}), dict)
diff --git a/tests/test_pregel.py b/tests/test_pregel.py
index becfa209..8c876136 100644
--- a/tests/test_pregel.py
+++ b/tests/test_pregel.py
@@ -1,88 +1,60 @@
from __future__ import absolute_import, unicode_literals
+from six import string_types
-import pytest
-
-from arango import ArangoClient
from arango.exceptions import (
PregelJobCreateError,
PregelJobGetError,
PregelJobDeleteError
)
-
-from .utils import (
- generate_db_name,
- generate_col_name,
- generate_graph_name,
-)
-
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-graph_name = generate_graph_name()
-graph = db.create_graph(graph_name)
-from_col_name = generate_col_name()
-to_col_name = generate_col_name()
-edge_col_name = generate_col_name()
-graph.create_vertex_collection(from_col_name)
-graph.create_vertex_collection(to_col_name)
-graph.create_edge_definition(
- edge_col_name, [from_col_name], [to_col_name]
+from tests.helpers import (
+ assert_raises,
+ generate_string
)
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
-
-
-@pytest.mark.order1
-def test_start_pregel_job():
- # Test start_pregel_job with page rank algorithm (happy path)
- job_id = db.create_pregel_job('pagerank', graph_name)
+def test_pregel_attributes(db, username):
+ assert db.pregel.context in ['default', 'async', 'batch', 'transaction']
+ assert db.pregel.username == username
+ assert db.pregel.db_name == db.name
+ assert repr(db.pregel) == ''.format(db.name)
+
+
+def test_pregel_management(db, graph):
+ # Test create pregel job
+ job_id = db.pregel.create_job(
+ graph.name,
+ 'pagerank',
+ store=False,
+ max_gss=100,
+ thread_count=1,
+ async_mode=False,
+ result_field='result',
+ algorithm_params={'threshold': 0.000001}
+ )
assert isinstance(job_id, int)
- # Test start_pregel_job with unsupported algorithm
- with pytest.raises(PregelJobCreateError):
- db.create_pregel_job('unsupported_algorithm', graph_name)
-
+ # Test create pregel job with unsupported algorithm
+ with assert_raises(PregelJobCreateError) as err:
+ db.pregel.create_job(graph.name, 'invalid')
+ assert err.value.error_code == 10
-@pytest.mark.order2
-def test_get_pregel_job():
- # Create a test Pregel job
- job_id = db.create_pregel_job('pagerank', graph_name)
-
- # Test pregel_job with existing job ID (happy path)
- job = db.pregel_job(job_id)
+ # Test get existing pregel job
+ job = db.pregel.job(job_id)
+ assert isinstance(job['state'], string_types)
assert isinstance(job['aggregators'], dict)
assert isinstance(job['gss'], int)
assert isinstance(job['received_count'], int)
assert isinstance(job['send_count'], int)
assert isinstance(job['total_runtime'], float)
- assert job['state'] == 'running'
- assert 'edge_count' in job
- assert 'vertex_count' in job
-
- # Test pregel_job with an invalid job ID
- with pytest.raises(PregelJobGetError):
- db.pregel_job(-1)
-
-
-@pytest.mark.order3
-def test_delete_pregel_job():
- # Create a test Pregel job
- job_id = db.create_pregel_job('pagerank', graph_name)
-
- # Get the newly created job
- job = db.pregel_job(job_id)
- assert job['state'] == 'running'
-
- # Test delete_pregel_job with existing job ID (happy path)
- assert db.delete_pregel_job(job_id) == True
- # The fetch for the same job should now fail
- with pytest.raises(PregelJobGetError):
- db.pregel_job(job_id)
+ # Test delete existing pregel job
+ assert db.pregel.delete_job(job_id) is True
+ with assert_raises(PregelJobGetError) as err:
+ db.pregel.job(job_id)
+ assert err.value.error_code == 10
- # Test delete_pregel_job with an invalid job ID
- with pytest.raises(PregelJobDeleteError):
- db.delete_pregel_job(-1)
+ # Test delete missing pregel job
+ with assert_raises(PregelJobDeleteError) as err:
+ db.pregel.delete_job(generate_string())
+ assert err.value.error_code == 10
diff --git a/tests/test_request.py b/tests/test_request.py
new file mode 100644
index 00000000..6d973864
--- /dev/null
+++ b/tests/test_request.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import, unicode_literals
+
+from arango.request import Request
+
+
+def test_request_no_data():
+ request = Request(
+ method='post',
+ endpoint='/_api/test',
+ params={'bool': True},
+ headers={'foo': 'bar'}
+ )
+ assert str(request) == '\r\n'.join([
+ 'post /_api/test?bool=1 HTTP/1.1',
+ 'charset: utf-8',
+ 'content-type: application/json',
+ 'foo: bar',
+ ])
+ assert request.method == 'post'
+ assert request.endpoint == '/_api/test'
+ assert request.params == {'bool': 1}
+ assert request.headers == {
+ 'charset': 'utf-8',
+ 'content-type': 'application/json',
+ 'foo': 'bar',
+ }
+ assert request.data is None
+ assert request.command is None
+ assert request.read is None
+ assert request.write is None
+
+
+def test_request_string_data():
+ request = Request(
+ method='post',
+ endpoint='/_api/test',
+ params={'bool': True},
+ headers={'foo': 'bar'},
+ data='test'
+ )
+ assert str(request) == '\r\n'.join([
+ 'post /_api/test?bool=1 HTTP/1.1',
+ 'charset: utf-8',
+ 'content-type: application/json',
+ 'foo: bar',
+ '\r\ntest',
+ ])
+ assert request.method == 'post'
+ assert request.endpoint == '/_api/test'
+ assert request.params == {'bool': 1}
+ assert request.headers == {
+ 'charset': 'utf-8',
+ 'content-type': 'application/json',
+ 'foo': 'bar',
+ }
+ assert request.data == 'test'
+ assert request.command is None
+ assert request.read is None
+ assert request.write is None
+
+
+def test_request_json_data():
+ request = Request(
+ method='post',
+ endpoint='/_api/test',
+ params={'bool': True},
+ headers={'foo': 'bar'},
+ data={'baz': 'qux'}
+ )
+ assert str(request) == '\r\n'.join([
+ 'post /_api/test?bool=1 HTTP/1.1',
+ 'charset: utf-8',
+ 'content-type: application/json',
+ 'foo: bar',
+ '\r\n{"baz": "qux"}',
+ ])
+ assert request.method == 'post'
+ assert request.endpoint == '/_api/test'
+ assert request.params == {'bool': 1}
+ assert request.headers == {
+ 'charset': 'utf-8',
+ 'content-type': 'application/json',
+ 'foo': 'bar',
+ }
+ assert request.data == '{"baz": "qux"}'
+ assert request.command is None
+ assert request.read is None
+ assert request.write is None
+
+
+def test_request_transaction_data():
+ request = Request(
+ method='post',
+ endpoint='/_api/test',
+ params={'bool': True},
+ headers={'foo': 'bar'},
+ data={'baz': 'qux'},
+ command='return 1',
+ read='one',
+ write='two',
+ )
+ assert str(request) == '\r\n'.join([
+ 'post /_api/test?bool=1 HTTP/1.1',
+ 'charset: utf-8',
+ 'content-type: application/json',
+ 'foo: bar',
+ '\r\n{"baz": "qux"}',
+ ])
+ assert request.method == 'post'
+ assert request.endpoint == '/_api/test'
+ assert request.params == {'bool': 1}
+ assert request.headers == {
+ 'charset': 'utf-8',
+ 'content-type': 'application/json',
+ 'foo': 'bar',
+ }
+ assert request.data == '{"baz": "qux"}'
+ assert request.command == 'return 1'
+ assert request.read == 'one'
+ assert request.write == 'two'
diff --git a/tests/test_response.py b/tests/test_response.py
new file mode 100644
index 00000000..1d2d9b78
--- /dev/null
+++ b/tests/test_response.py
@@ -0,0 +1,62 @@
+from __future__ import absolute_import, unicode_literals
+
+from requests.structures import CaseInsensitiveDict
+
+from arango.response import Response
+
+
+def test_response():
+ response = Response(
+ method='get',
+ url='test_url',
+ headers=CaseInsensitiveDict({'foo': 'bar'}),
+ status_text='baz',
+ status_code=200,
+ raw_body='true',
+ )
+ assert response.method == 'get'
+ assert response.url == 'test_url'
+ assert response.headers == {'foo': 'bar'}
+ assert response.status_code == 200
+ assert response.status_text == 'baz'
+ assert response.body is True
+ assert response.raw_body == 'true'
+ assert response.error_code is None
+ assert response.error_message is None
+
+ test_body = '{"errorNum": 1, "errorMessage": "qux"}'
+ response = Response(
+ method='get',
+ url='test_url',
+ headers=CaseInsensitiveDict({'foo': 'bar'}),
+ status_text='baz',
+ status_code=200,
+ raw_body=test_body,
+ )
+ assert response.method == 'get'
+ assert response.url == 'test_url'
+ assert response.headers == {'foo': 'bar'}
+ assert response.status_code == 200
+ assert response.status_text == 'baz'
+ assert response.body == {'errorNum': 1, 'errorMessage': 'qux'}
+ assert response.raw_body == test_body
+ assert response.error_code == 1
+ assert response.error_message == 'qux'
+
+ response = Response(
+ method='get',
+ url='test_url',
+ headers=CaseInsensitiveDict({'foo': 'bar'}),
+ status_text='baz',
+ status_code=200,
+ raw_body='invalid',
+ )
+ assert response.method == 'get'
+ assert response.url == 'test_url'
+ assert response.headers == {'foo': 'bar'}
+ assert response.status_code == 200
+ assert response.status_text == 'baz'
+ assert response.body == 'invalid'
+ assert response.raw_body == 'invalid'
+ assert response.error_code is None
+ assert response.error_message is None
diff --git a/tests/test_task.py b/tests/test_task.py
index 12b2d0b4..5d5a9847 100644
--- a/tests/test_task.py
+++ b/tests/test_task.py
@@ -1,71 +1,40 @@
from __future__ import absolute_import, unicode_literals
-import pytest
from six import string_types
-from arango import ArangoClient
-from arango.exceptions import *
-
-from .utils import (
- generate_db_name,
- generate_task_name,
- generate_task_id
+from arango.exceptions import (
+ TaskCreateError,
+ TaskDeleteError,
+ TaskGetError,
+ TaskListError
+)
+from tests.helpers import (
+ assert_raises,
+ extract,
+ generate_task_id,
+ generate_task_name,
)
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-bad_db_name = generate_db_name()
-bad_db = arango_client.db(bad_db_name)
-test_cmd = "require('@arangodb').print(params);"
-
-
-def teardown_module(*_):
- # Clean up any test tasks that were created
- for task in db.tasks():
- if task['name'].startswith('test_task_'):
- db.delete_task(task['id'])
- arango_client.delete_database(db_name, ignore_missing=True)
-
-
-def test_list_tasks():
- for task in db.tasks():
- assert task['database'] == '_system'
- assert task['type'] in {'periodic', 'timed'}
- assert isinstance(task['id'], string_types)
- assert isinstance(task['name'], string_types)
- assert isinstance(task['created'], float)
- assert isinstance(task['command'], string_types)
-
- with pytest.raises(TaskListError):
- bad_db.tasks()
-
-
-def test_get_task():
- # Test get existing tasks
- for task in db.tasks():
- assert db.task(task['id']) == task
-
- # Test get missing task
- with pytest.raises(TaskGetError):
- db.task(generate_task_id())
+def test_task_management(db, bad_db):
+ test_command = 'require("@arangodb").print(params);'
-def test_create_task():
# Test create task with random ID
task_name = generate_task_name()
new_task = db.create_task(
name=task_name,
- command=test_cmd,
+ command=test_command,
params={'foo': 1, 'bar': 2},
offset=1,
)
assert new_task['name'] == task_name
assert 'print(params)' in new_task['command']
assert new_task['type'] == 'timed'
- assert new_task['database'] == db_name
+ assert new_task['database'] == db.name
assert isinstance(new_task['created'], float)
assert isinstance(new_task['id'], string_types)
+
+ # Test get existing task
assert db.task(new_task['id']) == new_task
# Test create task with specific ID
@@ -73,7 +42,7 @@ def test_create_task():
task_id = generate_task_id()
new_task = db.create_task(
name=task_name,
- command=test_cmd,
+ command=test_command,
params={'foo': 1, 'bar': 2},
offset=1,
period=10,
@@ -83,40 +52,49 @@ def test_create_task():
assert new_task['id'] == task_id
assert 'print(params)' in new_task['command']
assert new_task['type'] == 'periodic'
- assert new_task['database'] == db_name
+ assert new_task['database'] == db.name
assert isinstance(new_task['created'], float)
assert db.task(new_task['id']) == new_task
# Test create duplicate task
- with pytest.raises(TaskCreateError):
+ with assert_raises(TaskCreateError) as err:
db.create_task(
name=task_name,
- command=test_cmd,
+ command=test_command,
params={'foo': 1, 'bar': 2},
task_id=task_id
)
+ assert err.value.error_code == 1851
+ # Test list tasks
+ for task in db.tasks():
+ assert task['database'] in db.databases()
+ assert task['type'] in {'periodic', 'timed'}
+ assert isinstance(task['id'], string_types)
+ assert isinstance(task['name'], string_types)
+ assert isinstance(task['created'], float)
+ assert isinstance(task['command'], string_types)
-def test_delete_task():
- # Set up a test task to delete
- task_name = generate_task_name()
- task_id = generate_task_id()
- db.create_task(
- name=task_name,
- command=test_cmd,
- params={'foo': 1, 'bar': 2},
- task_id=task_id,
- period=10
- )
+ # Test list tasks with bad database
+ with assert_raises(TaskListError) as err:
+ bad_db.tasks()
+ assert err.value.error_code == 1228
+
+ # Test get missing task
+ with assert_raises(TaskGetError) as err:
+ db.task(generate_task_id())
+ assert err.value.error_code == 1852
# Test delete existing task
+ assert task_id in extract('id', db.tasks())
assert db.delete_task(task_id) is True
- with pytest.raises(TaskGetError):
+ assert task_id not in extract('id', db.tasks())
+ with assert_raises(TaskGetError) as err:
db.task(task_id)
+ assert err.value.error_code == 1852
- # Test delete missing task without ignore_missing
- with pytest.raises(TaskDeleteError):
- db.delete_task(task_id, ignore_missing=False)
-
- # Test delete missing task with ignore_missing
+ # Test delete missing task
+ with assert_raises(TaskDeleteError) as err:
+ db.delete_task(generate_task_id(), ignore_missing=False)
+ assert err.value.error_code == 1852
assert db.delete_task(task_id, ignore_missing=True) is False
diff --git a/tests/test_transaction.py b/tests/test_transaction.py
index ea75ac32..eb88ec4d 100644
--- a/tests/test_transaction.py
+++ b/tests/test_transaction.py
@@ -1,426 +1,203 @@
from __future__ import absolute_import, unicode_literals
import pytest
+from six import string_types
-from arango import ArangoClient
-from arango.collections import Collection
-from arango.exceptions import TransactionError
-
-from .utils import (
- generate_db_name,
- generate_col_name,
+from arango.database import TransactionDatabase
+from arango.exceptions import (
+ TransactionStateError,
+ TransactionExecuteError,
+ TransactionJobResultError
)
+from arango.job import TransactionJob
+from tests.helpers import clean_doc, extract, generate_string
+
+
+# noinspection PyUnresolvedReferences
+def test_transaction_wrapper_attributes(db, col, username):
+ txn_db = db.begin_transaction(timeout=100, sync=True)
+ assert txn_db._executor._sync is True
+ assert txn_db._executor._timeout == 100
+ assert isinstance(txn_db, TransactionDatabase)
+ assert txn_db.username == username
+ assert txn_db.context == 'transaction'
+ assert txn_db.db_name == db.name
+ assert txn_db.name == db.name
+ assert repr(txn_db) == ''.format(db.name)
+
+ txn_col = txn_db.collection(col.name)
+ assert txn_col.username == username
+ assert txn_col.context == 'transaction'
+ assert txn_col.db_name == db.name
+ assert txn_col.name == col.name
+
+ txn_aql = txn_db.aql
+ assert txn_aql.username == username
+ assert txn_aql.context == 'transaction'
+ assert txn_aql.db_name == db.name
+
+ job = txn_col.get(generate_string())
+ assert isinstance(job, TransactionJob)
+ assert isinstance(job.id, string_types)
+ assert repr(job) == ''.format(job.id)
+
+
+def test_transaction_execute_without_result(db, col, docs):
+ with db.begin_transaction(return_result=False) as txn_db:
+ txn_col = txn_db.collection(col.name)
+
+ # Ensure that no jobs are returned
+ assert txn_col.insert(docs[0]) is None
+ assert txn_col.delete(docs[0]) is None
+ assert txn_col.insert(docs[1]) is None
+ assert txn_col.delete(docs[1]) is None
+ assert txn_col.insert(docs[2]) is None
+ assert txn_col.get(docs[2]) is None
+ assert txn_db.queued_jobs() is None
+
+ # Ensure that the operations went through
+ assert txn_db.queued_jobs() is None
+ assert extract('_key', col.all()) == [docs[2]['_key']]
+
+
+def test_transaction_execute_with_result(db, col, docs):
+ with db.begin_transaction(return_result=True) as txn_db:
+ txn_col = txn_db.collection(col.name)
+ job1 = txn_col.insert(docs[0])
+ job2 = txn_col.insert(docs[1])
+ job3 = txn_col.get(docs[1])
+ jobs = txn_db.queued_jobs()
+ assert jobs == [job1, job2, job3]
+ assert all(job.status() == 'pending' for job in jobs)
+
+ assert txn_db.queued_jobs() == [job1, job2, job3]
+ assert all(job.status() == 'done' for job in txn_db.queued_jobs())
+ assert extract('_key', col.all()) == extract('_key', docs[:2])
+
+ # Test successful results
+ assert job1.result()['_key'] == docs[0]['_key']
+ assert job2.result()['_key'] == docs[1]['_key']
+ assert job3.result()['_key'] == docs[1]['_key']
+
+
+def test_transaction_execute_error_in_result(db, col, docs):
+ txn_db = db.begin_transaction(timeout=100, sync=True)
+ txn_col = txn_db.collection(col.name)
+ job1 = txn_col.insert(docs[0])
+ job2 = txn_col.insert(docs[1])
+ job3 = txn_col.insert(docs[1]) # duplicate
+
+ with pytest.raises(TransactionExecuteError) as err:
+ txn_db.commit()
+ assert err.value.error_code == 1210
+
+ jobs = [job1, job2, job3]
+ assert txn_db.queued_jobs() == jobs
+ assert all(job.status() == 'pending' for job in jobs)
+
+
+def test_transaction_empty_commit(db):
+ txn_db = db.begin_transaction(return_result=True)
+ assert list(txn_db.commit()) == []
+
+ txn_db = db.begin_transaction(return_result=False)
+ assert txn_db.commit() is None
+
+
+def test_transaction_double_commit(db, col, docs):
+ txn_db = db.begin_transaction()
+ job = txn_db.collection(col.name).insert(docs[0])
+
+ # Test first commit
+ assert txn_db.commit() == [job]
+ assert job.status() == 'done'
+ assert len(col) == 1
+ assert clean_doc(col.random()) == docs[0]
+
+ # Test second commit which should fail
+ with pytest.raises(TransactionStateError) as err:
+ txn_db.commit()
+ assert 'already committed' in str(err.value)
+ assert job.status() == 'done'
+ assert len(col) == 1
+ assert clean_doc(col.random()) == docs[0]
+
+
+def test_transaction_action_after_commit(db, col):
+ with db.begin_transaction() as txn_db:
+ txn_db.collection(col.name).insert({})
+
+ # Test insert after the transaction has been committed
+ with pytest.raises(TransactionStateError) as err:
+ txn_db.collection(col.name).insert({})
+ assert 'already committed' in str(err.value)
+ assert len(col) == 1
+
+
+def test_transaction_method_not_allowed(db):
+ with pytest.raises(TransactionStateError) as err:
+ txn_db = db.begin_transaction()
+ txn_db.aql.functions()
+ assert str(err.value) == 'action not allowed in transaction'
-arango_client = ArangoClient()
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-col_name = generate_col_name()
-col = db.create_collection(col_name)
-
-doc1 = {'_key': '1', 'data': {'val': 100}}
-doc2 = {'_key': '2', 'data': {'val': 200}}
-doc3 = {'_key': '3', 'data': {'val': 300}}
-doc4 = {'_key': '4', 'data': {'val': 400}}
-doc5 = {'_key': '5', 'data': {'val': 500}}
-test_docs = [doc1, doc2, doc3, doc4, doc5]
-test_doc_keys = [d['_key'] for d in test_docs]
-
-
-def get_test_docs():
- return [t.copy() for t in test_docs]
-
-
-def teardown_module(*_):
- arango_client.delete_database(db_name, ignore_missing=True)
+ with pytest.raises(TransactionStateError) as err:
+ with db.begin_transaction() as txn_db:
+ txn_db.aql.functions()
+ assert str(err.value) == 'action not allowed in transaction'
+
+
+def test_transaction_execute_error(bad_db, col, docs):
+ txn_db = bad_db.begin_transaction(return_result=True)
+ job = txn_db.collection(col.name).insert_many(docs)
+
+ # Test transaction execute with bad database
+ with pytest.raises(TransactionExecuteError):
+ txn_db.commit()
+ assert len(col) == 0
+ assert job.status() == 'pending'
-def setup_function(*_):
- col.truncate()
+def test_transaction_job_result_not_ready(db, col, docs):
+ txn_db = db.begin_transaction(return_result=True)
+ job = txn_db.collection(col.name).insert_many(docs)
+ # Test get job result before commit
+ with pytest.raises(TransactionJobResultError) as err:
+ job.result()
+ assert str(err.value) == 'result not available yet'
-def test_init():
- txn = db.transaction(
- read=col_name,
- write=col_name,
- sync=True,
- timeout=1000,
- )
- assert txn.type == 'transaction'
- assert 'ArangoDB transaction {}'.format(txn.id) in repr(txn)
- assert isinstance(txn.collection('test'), Collection)
+ # Test commit to make sure it still works after the errors
+ assert list(txn_db.commit()) == [job]
+ assert len(job.result()) == len(docs)
+ assert extract('_key', col.all()) == extract('_key', docs)
-def test_execute_without_params():
- txn = db.transaction(write=col_name)
- result = txn.execute(
+def test_transaction_execute_raw(db, col, docs):
+ # Test execute raw transaction
+ doc = docs[0]
+ key = doc['_key']
+ result = db.execute_transaction(
command='''
- function () {{
+ function (params) {{
var db = require('internal').db;
- db.{col}.save({{ '_key': '1', 'val': 1}});
- db.{col}.save({{ '_key': '2', 'val': 2}});
- return 'success without params!';
+ db.{col}.save({{'_key': params.key, 'val': 1}});
+ return true;
}}
- '''.format(col=col_name),
+ '''.format(col=col.name),
+ params={'key': key},
+ write=[col.name],
+ read=[col.name],
sync=False,
- timeout=1000
- )
- assert result == 'success without params!'
- assert '1' in col and col['1']['val'] == 1
- assert '2' in col and col['2']['val'] == 2
-
-
-def test_execute_with_params():
- txn = db.transaction(write=col_name)
- result = txn.execute(
- command='''
- function (params) {{
- var db = require('internal').db;
- db.{col}.save({{ '_key': '1', 'val': params.one }});
- db.{col}.save({{ '_key': '2', 'val': params.two }});
- return 'success with params!';
- }}'''.format(col=col_name),
- params={'one': 3, 'two': 4}
+ timeout=1000,
+ max_size=100000,
+ allow_implicit=True,
+ intermediate_commit_count=10,
+ intermediate_commit_size=10000
)
- assert result == 'success with params!'
- assert col['1']['val'] == 3
- assert col['2']['val'] == 4
-
-
-def test_execute_with_errors():
- txn = db.transaction(write=col_name)
- bad_col_name = generate_col_name()
- with pytest.raises(TransactionError):
- txn.execute(
- command='''
- function (params) {{
- var db = require('internal').db;
- db.{col}.save({{ '_key': '1', 'val': params.one }});
- db.{col}.save({{ '_key': '2', 'val': params.two }});
- return 'this transaction should fail!';
- }}'''.format(col=bad_col_name),
- params={'one': 3, 'two': 4}
- )
-
-
-def test_unsupported_methods():
- txn = db.transaction(write=col_name)
-
- with pytest.raises(TransactionError):
- txn.collection(col_name).statistics()
-
- with pytest.raises(TransactionError):
- txn.collection(col_name).properties()
-
- with pytest.raises(TransactionError):
- txn.collection(col_name).checksum()
-
-
-def test_transaction_error():
- with pytest.raises(TransactionError):
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.truncate()
- txn_col.insert(doc1)
- txn_col.insert(doc1)
-
-
-def test_commit_on_error():
- try:
- with db.transaction(write=col_name, commit_on_error=True) as txn:
- txn_col = txn.collection(col_name)
- txn_col.insert(doc1)
- txn_col.insert(doc2)
- raise ValueError
- except ValueError:
- pass
- assert len(col) == 2
- assert doc1['_key'] in col
- assert doc2['_key'] in col
- assert doc3['_key'] not in col
-
-
-def test_collection_methods():
- # Set up test documents
- col.import_bulk(test_docs)
-
- with db.transaction(write=col_name) as txn:
- txn.collection(col_name).truncate()
- assert len(col) == 0
-
-
-def test_insert_documents():
- # Test document insert in transaction
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.insert(doc1)
- txn_col.insert(doc2)
- txn_col.insert(doc3)
-
- assert len(col) == 3
- assert col['1']['data']['val'] == 100
- assert col['2']['data']['val'] == 200
- assert col['3']['data']['val'] == 300
-
- # Test document insert_many in transaction
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.truncate()
- txn_col.insert_many(test_docs, sync=True)
- assert len(col) == 5
- assert col['1']['data']['val'] == 100
- assert col['2']['data']['val'] == 200
- assert col['3']['data']['val'] == 300
- assert col['4']['data']['val'] == 400
- assert col['5']['data']['val'] == 500
-
- # Test document insert_many in transaction
- with pytest.raises(TransactionError):
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.truncate()
- txn_col.insert(doc1)
- # This should thrown an error
- txn_col.insert(doc1)
- # Transaction should be rolled back
- assert len(col) == 5
- assert col['1']['data']['val'] == 100
- assert col['2']['data']['val'] == 200
- assert col['3']['data']['val'] == 300
- assert col['4']['data']['val'] == 400
- assert col['5']['data']['val'] == 500
-
-
-def test_update_documents():
- # Test document update in transaction
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.insert_many(test_docs)
- d1, d2, d3, _, _ = get_test_docs()
- d1['data'] = None
- d2['data'] = {'foo': 600}
- d3['data'] = {'foo': 600}
- txn_col.update(d1, keep_none=False, sync=True)
- txn_col.update(d2, merge=False, sync=True)
- txn_col.update(d3, merge=True, sync=True)
- assert len(col) == 5
- assert 'data' not in col['1']
- assert col['2']['data'] == {'foo': 600}
- assert col['3']['data'] == {'val': 300, 'foo': 600}
-
- # Test document update_many in transaction
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.truncate()
- txn_col.insert_many(test_docs)
- d1, d2, d3, _, _ = get_test_docs()
- d1['data'] = None
- d2['data'] = {'foo': 600}
- d3['data'] = {'foo': 600}
- txn_col.update_many([d1, d2], keep_none=False, merge=False)
- txn_col.update_many([d3], keep_none=False, merge=True)
- assert len(col) == 5
- assert 'data' not in col['1']
- assert col['2']['data'] == {'foo': 600}
- assert col['3']['data'] == {'val': 300, 'foo': 600}
-
- # Test document update_match in transaction
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.truncate()
- txn_col.insert_many(test_docs)
- txn_col.update_match({'_key': '1'}, {'data': 700})
- txn_col.update_match({'_key': '5'}, {'data': 800})
- txn_col.update_match({'_key': '7'}, {'data': 900})
- assert len(col) == 5
- assert col['1']['data'] == 700
- assert col['5']['data'] == 800
-
-
-def test_update_documents_with_revisions():
- # Set up test document
- col.insert(doc1)
-
- # Test document update with revision check
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- new_doc = doc1.copy()
- new_doc['data'] = {'val': 999}
- old_rev = col['1']['_rev']
- new_doc['_rev'] = old_rev + '000'
- txn_col.update(new_doc, check_rev=False)
- assert col['1']['_rev'] != old_rev
- assert col['1']['data'] == {'val': 999}
-
- # Test document update without revision check
- with pytest.raises(TransactionError):
- col.insert(doc2)
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- new_doc = doc2.copy()
- new_doc['data'] = {'bar': 'baz'}
- old_rev = col['2']['_rev']
- new_doc['_rev'] = old_rev + '000'
- txn_col.update(new_doc, check_rev=True)
- assert col['2']['_rev'] == old_rev
- assert col['2']['data'] != {'bar': 'baz'}
-
-
-def test_replace_documents():
- # Test document replace in transaction
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.insert_many(test_docs)
- d1, d2, d3, _, _ = get_test_docs()
- d1['data'] = None
- d2['data'] = {'foo': 600}
- d3['data'] = {'bar': 600}
- txn_col.replace(d1, sync=True)
- txn_col.replace(d2, sync=True)
- txn_col.replace(d3, sync=True)
- assert len(col) == 5
- assert col['1']['data'] is None
- assert col['2']['data'] == {'foo': 600}
- assert col['3']['data'] == {'bar': 600}
-
- # Test document replace_many in transaction
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.truncate()
- txn_col.insert_many(test_docs)
- d1, d2, d3, _, _ = get_test_docs()
- d1['data'] = None
- d2['data'] = {'foo': 600}
- d3['data'] = {'bar': 600}
- txn_col.replace_many([d1, d2])
- txn_col.replace_many([d3])
- assert len(col) == 5
- assert col['1']['data'] is None
- assert col['2']['data'] == {'foo': 600}
- assert col['3']['data'] == {'bar': 600}
-
- # Test document replace_match in transaction
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.truncate()
- txn_col.insert_many(test_docs)
- txn_col.replace_match({'_key': '1'}, {'data': 700})
- txn_col.replace_match({'_key': '5'}, {'data': 800})
- txn_col.replace_match({'_key': '7'}, {'data': 900})
- assert len(col) == 5
- assert col['1']['data'] == 700
- assert col['5']['data'] == 800
-
-
-def test_replace_documents_with_revisions():
- # Set up test document
- col.insert(doc1)
-
- # TODO does not seem to work with 3.1
- # Test document replace without revision check
- # with db.transaction(write=col_name) as txn:
- # txn_col = txn.collection(col_name)
- # new_doc = doc1.copy()
- # new_doc['data'] = {'val': 999}
- # old_rev = col['1']['_rev']
- # new_doc['_rev'] = old_rev + '000'
- # txn_col.replace(new_doc, check_rev=False)
- # assert col['1']['_rev'] != old_rev
- # assert col['1']['data'] == {'val': 999}
-
- # Test document replace with revision check
- with pytest.raises(TransactionError):
- col.insert(doc2)
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- new_doc = doc2.copy()
- new_doc['data'] = {'bar': 'baz'}
- old_rev = col['2']['_rev']
- new_doc['_rev'] = old_rev + '000'
- txn_col.replace(new_doc, check_rev=True)
- assert col['2']['_rev'] == old_rev
- assert col['2']['data'] != {'bar': 'baz'}
-
-
-def test_delete_documents():
- # Test document delete in transaction
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- txn_col.insert_many(test_docs)
- d1, d2, d3, _, _ = get_test_docs()
- txn_col.delete(d1, sync=True)
- txn_col.delete(d2['_key'], sync=True)
- txn_col.delete(d3['_key'], sync=False)
- assert len(col) == 2
- assert '4' in col
- assert '5' in col
-
- # Test document delete_many in transaction
- with db.transaction(
- write=col_name,
- timeout=10000,
- commit_on_error=True
- ) as txn:
- txn_col = txn.collection(col_name)
- txn_col.truncate()
- txn_col.insert_many(test_docs)
- txn_col.delete_many([doc1, doc2, doc3], sync=True)
- txn_col.delete_many([doc3, doc4, doc5], sync=False)
- assert len(col) == 0
+ assert result is True
+ assert doc in col and col[key]['val'] == 1
- # Test document delete_match in transaction
- with db.transaction(
- write=col_name,
- timeout=10000,
- commit_on_error=True
- ) as txn:
- txn_col = txn.collection(col_name)
- txn_col.truncate()
- txn_col.insert_many(test_docs)
- new_docs = get_test_docs()
- for doc in new_docs:
- doc['val'] = 100
- txn_col.update_many(new_docs)
- txn_col.delete_match({'val': 100}, limit=2, sync=True)
- assert len(col) == 3
-
-
-def test_delete_documents_with_revision():
- # Set up test document
- col.insert(doc1)
-
- # TODO does not seem to work in 3.1
- # Test document delete without revision check
- # with db.transaction(write=col_name) as txn:
- # txn_col = txn.collection(col_name)
- # new_doc = doc1.copy()
- # new_doc['_rev'] = col['1']['_rev'] + '000'
- # txn_col.delete(new_doc, check_rev=False)
- # assert len(col) == 0
-
- # Test document delete with revision check
- col.insert(doc2)
- with pytest.raises(TransactionError):
- with db.transaction(write=col_name) as txn:
- txn_col = txn.collection(col_name)
- new_doc = doc2.copy()
- new_doc['_rev'] = col['2']['_rev'] + '000'
- txn_col.replace(new_doc, check_rev=True)
- assert len(col) == 2
-
-
-def test_bad_collections():
- with pytest.raises(TransactionError):
- with db.transaction(
- write=['missing'],
- timeout=10000
- ) as txn:
- txn_col = txn.collection(col_name)
- txn_col.insert(doc1)
-
- with pytest.raises(TransactionError):
- with db.transaction(
- read=[col_name],
- timeout=10000
- ) as txn:
- txn_col = txn.collection(col_name)
- txn_col.insert(doc2)
+ # Test execute invalid transaction
+ with pytest.raises(TransactionExecuteError) as err:
+ db.execute_transaction(command='INVALID COMMAND')
+ assert err.value.error_code == 10
diff --git a/tests/test_user.py b/tests/test_user.py
index 2f022573..6520c104 100644
--- a/tests/test_user.py
+++ b/tests/test_user.py
@@ -1,615 +1,192 @@
from __future__ import absolute_import, unicode_literals
from six import string_types
-import pytest
-from arango import ArangoClient
-from arango.utils import HTTP_AUTH_ERR
-from arango.exceptions import *
-
-from .utils import (
- generate_user_name,
+from arango.exceptions import (
+ DatabasePropertiesError,
+ UserCreateError,
+ UserDeleteError,
+ UserGetError,
+ UserListError,
+ UserReplaceError,
+ UserUpdateError,
+)
+from tests.helpers import (
+ assert_raises,
+ extract,
generate_db_name,
- generate_col_name
+ generate_username,
+ generate_string,
)
-arango_client = ArangoClient()
-bad_client = ArangoClient(password='incorrect')
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
-bad_db = bad_client.db(db_name)
-another_db_name = generate_db_name()
-
-
-def teardown_module(*_):
- # Clean up any users that were created during the test
- for user in arango_client.users():
- if user['username'].startswith('test_user'):
- arango_client.delete_user(user['username'])
- arango_client.delete_database(db_name, ignore_missing=True)
-
-
-def test_list_users():
- for user in arango_client.users():
- assert isinstance(user['username'], string_types)
- assert isinstance(user['active'], bool)
- assert isinstance(user['extra'], dict)
-
- with pytest.raises(UserListError) as err:
- bad_client.users()
- assert err.value.http_code in HTTP_AUTH_ERR
-
-
-def test_get_user():
- # Get existing user
- for user in arango_client.users():
- assert arango_client.user(user['username']) == user
- # Get a missing user
- bad_username = generate_user_name()
- with pytest.raises(UserGetError) as err:
- arango_client.user(bad_username)
- assert err.value.http_code == 404
+def test_user_management(sys_db, bad_db):
+ # Test create user
+ username = generate_username()
+ password = generate_string()
+ assert not sys_db.has_user(username)
-
-def test_create_user():
- # Create a new user
- username = generate_user_name()
- new_user = arango_client.create_user(
+ new_user = sys_db.create_user(
username=username,
- password='password',
+ password=password,
active=True,
extra={'foo': 'bar'},
)
assert new_user['username'] == username
assert new_user['active'] is True
assert new_user['extra'] == {'foo': 'bar'}
- assert arango_client.user(username) == new_user
-
- # Create a duplicate user
- with pytest.raises(UserCreateError) as err:
- arango_client.create_user(username=username, password='foo')
- assert 'duplicate' in err.value.message
+ assert sys_db.has_user(username)
-
-def test_update_user():
- username = generate_user_name()
- arango_client.create_user(
- username=username,
- password='password',
- active=True,
- extra={'foo': 'bar'},
- )
-
- # Update an existing user
- new_user = arango_client.update_user(
- username=username,
- password='new_password',
- active=False,
- extra={'bar': 'baz'},
- )
- assert new_user['username'] == username
- assert new_user['active'] is False
- assert new_user['extra'] == {'foo': 'bar', 'bar': 'baz'}
- assert arango_client.user(username) == new_user
-
- # Update a missing user
- bad_username = generate_user_name()
- with pytest.raises(UserUpdateError) as err:
- arango_client.update_user(
- username=bad_username,
- password='new_password'
- )
- assert err.value.http_code == 404
-
-
-def test_replace_user():
- username = generate_user_name()
- arango_client.create_user(
- username=username,
- password='password',
- active=True,
- extra={'foo': 'bar'},
- )
-
- # Replace an existing user
- new_user = arango_client.replace_user(
- username=username,
- password='password',
- active=False,
- extra={'bar': 'baz'},
- )
- assert new_user['username'] == username
- assert new_user['active'] is False
- assert new_user['extra'] == {'bar': 'baz'}
- assert arango_client.user(username) == new_user
-
- # Replace a missing user
- bad_username = generate_user_name()
- with pytest.raises(UserReplaceError) as err:
- arango_client.replace_user(
- username=bad_username,
- password='new_password'
+ # Test create duplicate user
+ with assert_raises(UserCreateError) as err:
+ sys_db.create_user(
+ username=username,
+ password=password
)
- assert err.value.http_code == 404
-
-
-def test_delete_user():
- username = generate_user_name()
- arango_client.create_user(
- username=username,
- password='password'
- )
-
- # Delete an existing user
- assert arango_client.delete_user(username) is True
-
- # Delete a missing user without ignore_missing
- with pytest.raises(UserDeleteError) as err:
- arango_client.delete_user(username, ignore_missing=False)
- assert err.value.http_code == 404
-
- # Delete a missing user with ignore_missing
- assert arango_client.delete_user(username, ignore_missing=True) is False
-
-
-def test_grant_user_access():
- # Create a test user and login as that user
- username = generate_user_name()
- arango_client.create_user(username=username, password='password')
- user_db = arango_client.database(
- name=db_name,
- username=username,
- password='password'
- )
-
- # Create a collection with the user (should have no access)
- col_name = generate_col_name()
- with pytest.raises(CollectionCreateError) as err:
- user_db.create_collection(col_name)
- assert err.value.http_code in HTTP_AUTH_ERR
- assert col_name not in set(col['name'] for col in db.collections())
-
- # Grant the user access and try again
- arango_client.grant_user_access(username, db_name)
- db.create_collection(col_name)
- assert col_name in set(col['name'] for col in db.collections())
-
- # Grant access to a missing user
- bad_username = generate_user_name()
- with pytest.raises(UserGrantAccessError) as err:
- arango_client.grant_user_access(bad_username, db_name)
- assert err.value.http_code == 404
-
-
-def test_revoke_user_access():
- # Create a test user with access and login as that user
- username = generate_user_name()
- arango_client.create_user(username=username, password='password')
- arango_client.grant_user_access(username, db_name)
- user_db = arango_client.database(
- name=db_name,
- username=username,
- password='password'
- )
-
- # Test user access by creating a collection
- col_name = generate_col_name()
- user_db.create_collection(col_name)
- assert col_name in set(col['name'] for col in db.collections())
-
- # Revoke access from the user
- arango_client.revoke_user_access(username, db_name)
- with pytest.raises(CollectionDeleteError) as err:
- user_db.delete_collection(col_name)
- assert err.value.http_code in HTTP_AUTH_ERR
-
- # Test revoke access to missing user
- bad_username = generate_user_name()
- with pytest.raises(UserRevokeAccessError) as err:
- arango_client.revoke_user_access(bad_username, db_name)
- assert err.value.http_code == 404
-
-
-def test_get_user_access():
- # Create a test user
- username = generate_user_name()
- arango_client.create_user(username=username, password='password')
-
- # Get user access (should be empty initially)
- assert arango_client.user_access(username) == []
-
- # Grant user access to the database and check again
- arango_client.grant_user_access(username, db_name)
- assert arango_client.user_access(username) == [db_name]
-
- # Get access of a missing user
- bad_username = generate_user_name()
- assert arango_client.user_access(bad_username) == []
-
- # Get access of a user from a bad client (incorrect password)
- with pytest.raises(UserAccessError) as err:
- bad_client.user_access(username)
- assert err.value.http_code in HTTP_AUTH_ERR
-
-
-def test_change_password():
- username = generate_user_name()
- arango_client.create_user(username=username, password='password1')
- arango_client.grant_user_access(username, db_name)
-
- db1 = arango_client.db(db_name, username, 'password1')
- db2 = arango_client.db(db_name, username, 'password2')
+ assert err.value.error_code == 1702
- # Ensure that the user can make requests with correct credentials
- db1.properties()
-
- # Ensure that the user cannot make requests with bad credentials
- with pytest.raises(DatabasePropertiesError) as err:
- db2.properties()
- assert err.value.http_code in HTTP_AUTH_ERR
-
- # Update the user password and test again
- arango_client.update_user(username=username, password='password2')
- db2.properties()
-
- # TODO ArangoDB 3.2 seems to have broken authentication:
- # TODO When the password of a user is changed, the old password still works
- # db1.create_collection('test1')
- # with pytest.raises(DatabasePropertiesError) as err:
- # db1.create_collection('test')
- # assert err.value.http_code in HTTP_AUTH_ERR
- #
- # # Replace the user password and test again
- # arango_client.update_user(username=username, password='password1')
- # db1.properties()
- # with pytest.raises(DatabasePropertiesError) as err:
- # db2.properties()
- # assert err.value.http_code in HTTP_AUTH_ERR
-
-
-def test_create_user_with_database():
- username1 = generate_user_name()
- username2 = generate_user_name()
- username3 = generate_user_name()
- user_db = arango_client.create_database(
- name=another_db_name,
- users=[
- {'username': username1, 'password': 'password1'},
- {'username': username2, 'password': 'password2'},
- {'username': username3, 'password': 'password3', 'active': False},
- ],
- username=username1,
- password='password1'
- )
- # Test if the users were created properly
- all_usernames = set(user['username'] for user in arango_client.users())
- assert username1 in all_usernames
- assert username2 in all_usernames
-
- # Test if the first user has access to the database
- assert user_db.connection.username == username1
- assert user_db.connection.password == 'password1'
- user_db.properties()
-
- # Test if the second user also has access to the database
- user_db = arango_client.database(another_db_name, username2, 'password2')
- assert user_db.connection.username == username2
- assert user_db.connection.password == 'password2'
- user_db.properties()
-
- # Test if the third user has access to the database (should not)
- user_db = arango_client.database(another_db_name, username3, 'password3')
- assert user_db.connection.username == username3
- assert user_db.connection.password == 'password3'
- with pytest.raises(DatabasePropertiesError) as err:
- user_db.properties()
- assert err.value.http_code in HTTP_AUTH_ERR
-
-def test_list_users_db_level():
- for user in db.users():
+ # Test list users
+ for user in sys_db.users():
assert isinstance(user['username'], string_types)
assert isinstance(user['active'], bool)
assert isinstance(user['extra'], dict)
+ assert sys_db.user(username) == new_user
- with pytest.raises(UserListError) as err:
+ # Test list users with bad database
+ with assert_raises(UserListError) as err:
bad_db.users()
- assert err.value.http_code in HTTP_AUTH_ERR
-
+ assert err.value.error_code == 1228
-def test_get_user_db_level():
- # Get existing user
- for user in db.users():
- assert db.user(user['username']) == user
+ # Test get user
+ users = sys_db.users()
+ for user in users:
+ assert 'active' in user
+ assert 'extra' in user
+ assert 'username' in user
+ assert username in extract('username', sys_db.users())
- # Get a missing user
- bad_username = generate_user_name()
- with pytest.raises(UserGetError) as err:
- db.user(bad_username)
- assert err.value.http_code == 404
+ # Test get missing user
+ with assert_raises(UserGetError) as err:
+ sys_db.user(generate_username())
+ assert err.value.error_code == 1703
-
-def test_create_user_db_level():
- # Create a new user
- username = generate_user_name()
- new_user = db.create_user(
+ # Update existing user
+ new_user = sys_db.update_user(
username=username,
- password='password',
- active=True,
- extra={'foo': 'bar'},
- )
- assert new_user['username'] == username
- assert new_user['active'] is True
- assert new_user['extra'] == {'foo': 'bar'}
- assert db.user(username) == new_user
-
- # Create a duplicate user
- with pytest.raises(UserCreateError) as err:
- db.create_user(username=username, password='foo')
- assert 'duplicate' in err.value.message
-
-
-def test_update_user_db_level():
- username = generate_user_name()
- db.create_user(
- username=username,
- password='password',
- active=True,
- extra={'foo': 'bar'},
- )
-
- # Update an existing user
- new_user = db.update_user(
- username=username,
- password='new_password',
+ password=password,
active=False,
extra={'bar': 'baz'},
)
assert new_user['username'] == username
assert new_user['active'] is False
- assert new_user['extra'] == {'foo': 'bar', 'bar': 'baz'}
- assert db.user(username) == new_user
-
- # Update a missing user
- bad_username = generate_user_name()
- with pytest.raises(UserUpdateError) as err:
- db.update_user(
- username=bad_username,
- password='new_password'
- )
- assert err.value.http_code == 404
-
+ assert new_user['extra'] == {'bar': 'baz'}
+ assert sys_db.user(username) == new_user
-def test_replace_user_db_level():
- username = generate_user_name()
- db.create_user(
- username=username,
- password='password',
- active=True,
- extra={'foo': 'bar'},
- )
+ # Update missing user
+ with assert_raises(UserUpdateError) as err:
+ sys_db.update_user(
+ username=generate_username(),
+ password=generate_string()
+ )
+ assert err.value.error_code == 1703
- # Replace an existing user
- new_user = db.replace_user(
+ # Replace existing user
+ new_user = sys_db.replace_user(
username=username,
- password='password',
+ password=password,
active=False,
- extra={'bar': 'baz'},
+ extra={'baz': 'qux'},
)
assert new_user['username'] == username
assert new_user['active'] is False
- assert new_user['extra'] == {'bar': 'baz'}
- assert db.user(username) == new_user
-
- # Replace a missing user
- bad_username = generate_user_name()
- with pytest.raises(UserReplaceError) as err:
- db.replace_user(
- username=bad_username,
- password='new_password'
+ assert new_user['extra'] == {'baz': 'qux'}
+ assert sys_db.user(username) == new_user
+
+ # Replace missing user
+ with assert_raises(UserReplaceError) as err:
+ sys_db.replace_user(
+ username=generate_username(),
+ password=generate_string()
)
- assert err.value.http_code == 404
-
-
-def test_delete_user_db_level():
- username = generate_user_name()
- db.create_user(
- username=username,
- password='password'
- )
+ assert err.value.error_code == 1703
# Delete an existing user
- assert db.delete_user(username) is True
+ assert sys_db.delete_user(username) is True
- # Delete a missing user without ignore_missing
- with pytest.raises(UserDeleteError) as err:
- db.delete_user(username, ignore_missing=False)
- assert err.value.http_code == 404
+ # Delete a missing user
+ with assert_raises(UserDeleteError) as err:
+ sys_db.delete_user(username, ignore_missing=False)
+ assert err.value.error_code == 1703
+ assert sys_db.delete_user(username, ignore_missing=True) is False
- # Delete a missing user with ignore_missing
- assert db.delete_user(username, ignore_missing=True) is False
+def test_user_change_password(client, sys_db):
+ username = generate_username()
+ password1 = generate_string()
+ password2 = generate_string()
-def test_grant_user_access_db_level():
- # Create a test user and login as that user
- username = generate_user_name()
- db.create_user(username=username, password='password')
- user_db = arango_client.database(
- name=db_name,
- username=username,
- password='password'
- )
-
- # Create a collection with the user (should have no access)
- col_name = generate_col_name()
- with pytest.raises(CollectionCreateError) as err:
- user_db.create_collection(col_name)
- assert err.value.http_code in HTTP_AUTH_ERR
- assert col_name not in set(col['name'] for col in db.collections())
-
- # Grant the user access and try again
- db.grant_user_access(username)
- db.create_collection(col_name)
- assert col_name in set(col['name'] for col in db.collections())
-
- # Grant access to a missing user
- bad_username = generate_user_name()
- with pytest.raises(UserGrantAccessError) as err:
- db.grant_user_access(bad_username)
- assert err.value.http_code == 404
-
-
-def test_revoke_user_access_db_level():
- # Create a test user with access and login as that user
- username = generate_user_name()
- db.create_user(username=username, password='password')
- db.grant_user_access(username, db_name)
- user_db = arango_client.database(
- name=db_name,
- username=username,
- password='password'
- )
-
- # Test user access by creating a collection
- col_name = generate_col_name()
- user_db.create_collection(col_name)
- assert col_name in set(col['name'] for col in db.collections())
+ sys_db.create_user(username, password1)
+ sys_db.update_permission(username, 'rw', sys_db.name)
- # Revoke access from the user
- db.revoke_user_access(username)
- with pytest.raises(CollectionDeleteError) as err:
- user_db.delete_collection(col_name)
- assert err.value.http_code in HTTP_AUTH_ERR
-
- # Test revoke access to missing user
- bad_username = generate_user_name()
- with pytest.raises(UserRevokeAccessError) as err:
- db.revoke_user_access(bad_username)
- assert err.value.http_code == 404
+ db1 = client.db(sys_db.name, username, password1)
+ db2 = client.db(sys_db.name, username, password2)
+ # Check authentication
+ assert isinstance(db1.properties(), dict)
+ with assert_raises(DatabasePropertiesError) as err:
+ db2.properties()
+ assert err.value.http_code == 401
+
+ # Update the user password and check again
+ sys_db.update_user(username, password2)
+ assert isinstance(db2.properties(), dict)
+ with assert_raises(DatabasePropertiesError) as err:
+ db1.properties()
+ assert err.value.http_code == 401
+
+ # Replace the user password back and check again
+ sys_db.update_user(username, password1)
+ assert isinstance(db1.properties(), dict)
+ with assert_raises(DatabasePropertiesError) as err:
+ db2.properties()
+ assert err.value.http_code == 401
-def test_get_user_access_db_level():
- # Create a test user
- username = generate_user_name()
- db.create_user(username=username, password='password')
- # Get user access (should be none initially)
- assert db.user_access(username) is None
+def test_user_create_with_new_database(client, sys_db):
+ db_name = generate_db_name()
- # Grant user access to the database and check again
- db.grant_user_access(username)
- assert db.user_access(username) == 'rw'
+ username1 = generate_username()
+ username2 = generate_username()
+ username3 = generate_username()
- # Get access of a missing user
- bad_username = generate_user_name()
- assert db.user_access(bad_username) is None
+ password1 = generate_string()
+ password2 = generate_string()
+ password3 = generate_string()
- # Get user access from a bad database (incorrect password)
- with pytest.raises(UserAccessError) as err:
- bad_db.user_access(bad_username)
- assert err.value.http_code in HTTP_AUTH_ERR
+ result = sys_db.create_database(
+ name=db_name,
+ users=[
+ {'username': username1, 'password': password1, 'active': True},
+ {'username': username2, 'password': password2, 'active': True},
+ {'username': username3, 'password': password3, 'active': False},
+ ]
+ )
+ assert result is True
+ # Test if the users were created properly
+ usernames = extract('username', sys_db.users())
+ assert all(u in usernames for u in [username1, username2, username3])
-def test_grant_user_access_collection_level():
- # Create a new test user
- username = generate_user_name()
- db.create_user(username=username, password='password')
+ # Test if the first user has access to the database
+ db = client.db(db_name, username1, password1)
+ db.properties()
- # Sign in as the user
- user_db = arango_client.database(
- name=db_name,
- username=username,
- password='password'
- )
+ # Test if the second user also has access to the database
+ db = client.db(db_name, username2, password2)
+ db.properties()
- # Create a new collection
- col_name = generate_col_name()
- col = db.create_collection(col_name)
-
- # The new user should have no access to the collection
- with pytest.raises(ArangoError) as err:
- user_db.collection(col_name).count()
- assert err.value.http_code in HTTP_AUTH_ERR
- assert isinstance(err.value, DocumentCountError) \
- or isinstance(err.value, AsyncExecuteError) \
- or isinstance(err.value, BatchExecuteError)
-
- # After granting access to the collection it should work
- col.grant_user_access(username)
- assert user_db.collection(col_name).count() == 0
-
- # Grant access from a bad database (missing user)
- with pytest.raises(ArangoError) as err:
- bad_db.collection(col_name).grant_user_access(generate_user_name())
- assert err.value.http_code in HTTP_AUTH_ERR
- assert isinstance(err.value, UserGrantAccessError) \
- or isinstance(err.value, AsyncExecuteError) \
- or isinstance(err.value, BatchExecuteError)
-
-
-def test_revoke_user_access_collection_level():
- # Create a new test user
- username = generate_user_name()
- db.create_user(username=username, password='password')
- user_db = arango_client.database(
- name=db_name,
- username=username,
- password='password'
- )
- col_name = generate_col_name()
- col = db.create_collection(col_name)
-
- # The new user should have no access to the collection
- with pytest.raises(ArangoError) as err:
- user_db.collection(col_name).count()
- assert err.value.http_code in HTTP_AUTH_ERR
-
- # After granting access to the collection it should work
- col.grant_user_access(username)
- assert user_db.collection(col_name).count() == 0
-
- # Revoke the access again to see that 401 is back
- col.revoke_user_access(username)
- with pytest.raises(ArangoError) as err:
- user_db.collection(col_name).count()
- assert err.value.http_code in HTTP_AUTH_ERR
-
- # Grant access from a bad database (missing user)
- with pytest.raises(ArangoError) as err:
- bad_db.collection(col_name).revoke_user_access(generate_user_name())
- assert err.value.http_code in HTTP_AUTH_ERR
- assert isinstance(err.value, UserRevokeAccessError) \
- or isinstance(err.value, AsyncExecuteError) \
- or isinstance(err.value, BatchExecuteError)
-
-
-def test_get_user_access_collection_level():
- # Create a new test user
- username = generate_user_name()
- db.create_user(username=username, password='password')
- user_db = arango_client.database(
- name=db_name,
- username=username,
- password='password'
- )
- col_name = generate_col_name()
- col = db.create_collection(col_name)
-
- # The new user should have no access to the collection
- assert col.user_access(username) is None
-
- # After granting access to the collection it should work
- col.grant_user_access(username)
- assert col.user_access(username) == 'rw'
-
- # Revoke the access again to see that 401 is back
- col.revoke_user_access(username)
- assert col.user_access(username) is None
-
- # Grant access from a bad database (missing user)
- with pytest.raises(ArangoError) as err:
- bad_db.collection(col_name).user_access(generate_user_name())
- assert err.value.http_code in HTTP_AUTH_ERR
- assert isinstance(err.value, UserAccessError) \
- or isinstance(err.value, AsyncExecuteError) \
- or isinstance(err.value, BatchExecuteError)
\ No newline at end of file
+ # Test if the third user has access to the database (should not)
+ db = client.db(db_name, username3, password3)
+ with assert_raises(DatabasePropertiesError) as err:
+ db.properties()
+ assert err.value.http_code == 401
diff --git a/tests/test_version.py b/tests/test_version.py
deleted file mode 100644
index 16ab289a..00000000
--- a/tests/test_version.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from arango.version import VERSION
-
-
-def test_package_version():
- assert VERSION.count('.', 2)
- assert all([number.isdigit() for number in VERSION.split('.')])
diff --git a/tests/test_wal.py b/tests/test_wal.py
index 50586b06..087ae9da 100644
--- a/tests/test_wal.py
+++ b/tests/test_wal.py
@@ -1,41 +1,31 @@
from __future__ import absolute_import, unicode_literals
-import pytest
-
-from arango import ArangoClient
from arango.exceptions import (
WALConfigureError,
WALFlushError,
WALPropertiesError,
WALTransactionListError
)
-
-from .utils import generate_user_name, generate_db_name
-
-arango_client = ArangoClient()
-username = generate_user_name()
-user = arango_client.create_user(username, 'password')
-db_name = generate_db_name()
-db = arango_client.create_database(db_name)
+from tests.helpers import assert_raises
-def teardown_module(*_):
- arango_client.delete_user(username, ignore_missing=True)
-
-
-@pytest.mark.order1
-def test_wal_properties():
- properties = arango_client.wal.properties()
- assert 'ArangoDB write-ahead log' in repr(arango_client.wal)
+def test_wal_misc_methods(sys_db, bad_db):
+ # Test get properties
+ properties = sys_db.wal.properties()
assert 'oversized_ops' in properties
assert 'log_size' in properties
assert 'historic_logs' in properties
assert 'reserve_logs' in properties
+ assert 'throttle_wait' in properties
+ assert 'throttle_limit' in properties
+ # Test get properties with bad database
+ with assert_raises(WALPropertiesError) as err:
+ bad_db.wal.properties()
+ assert err.value.error_code == 1228
-@pytest.mark.order2
-def test_wal_configure():
- arango_client.wal.configure(
+ # Test configure properties
+ sys_db.wal.configure(
historic_logs=15,
oversized_ops=False,
log_size=30000000,
@@ -43,7 +33,7 @@ def test_wal_configure():
throttle_limit=0,
throttle_wait=16000
)
- properties = arango_client.wal.properties()
+ properties = sys_db.wal.properties()
assert properties['historic_logs'] == 15
assert properties['oversized_ops'] is False
assert properties['log_size'] == 30000000
@@ -51,100 +41,26 @@ def test_wal_configure():
assert properties['throttle_limit'] == 0
assert properties['throttle_wait'] == 16000
+ # Test configure properties with bad database
+ with assert_raises(WALConfigureError) as err:
+ bad_db.wal.configure(log_size=2000000)
+ assert err.value.error_code == 1228
-@pytest.mark.order3
-def test_wal_list_transactions():
- result = arango_client.wal.transactions()
+ # Test get transactions
+ result = sys_db.wal.transactions()
assert 'count' in result
- assert 'last_sealed' in result
assert 'last_collected' in result
+ # Test get transactions with bad database
+ with assert_raises(WALTransactionListError) as err:
+ bad_db.wal.transactions()
+ assert err.value.error_code == 1228
-@pytest.mark.order4
-def test_flush_wal():
- result = arango_client.wal.flush(garbage_collect=False, sync=False)
+ # Test flush
+ result = sys_db.wal.flush(garbage_collect=False, sync=False)
assert isinstance(result, bool)
-
-@pytest.mark.order5
-def test_wal_errors():
- client_with_bad_user = ArangoClient(
- username=username,
- password='incorrect',
- verify=False
- )
- bad_wal = client_with_bad_user.wal
- with pytest.raises(WALPropertiesError):
- bad_wal.properties()
-
- with pytest.raises(WALConfigureError):
- bad_wal.configure(log_size=2000000)
-
- with pytest.raises(WALTransactionListError):
- bad_wal.transactions()
-
- with pytest.raises(WALFlushError):
- bad_wal.flush(garbage_collect=False, sync=False)
-
-
-@pytest.mark.order6
-def test_wal_properties_db_level():
- properties = db.wal.properties()
- assert 'ArangoDB write-ahead log' in repr(arango_client.wal)
- assert 'oversized_ops' in properties
- assert 'log_size' in properties
- assert 'historic_logs' in properties
- assert 'reserve_logs' in properties
-
-
-@pytest.mark.order7
-def test_wal_configure_db_level():
- db.wal.configure(
- historic_logs=15,
- oversized_ops=False,
- log_size=30000000,
- reserve_logs=5,
- throttle_limit=0,
- throttle_wait=16000
- )
- properties = db.wal.properties()
- assert properties['historic_logs'] == 15
- assert properties['oversized_ops'] is False
- assert properties['log_size'] == 30000000
- assert properties['reserve_logs'] == 5
- assert properties['throttle_limit'] == 0
- assert properties['throttle_wait'] == 16000
-
-
-@pytest.mark.order8
-def test_wal_list_transactions_db_level():
- result = db.wal.transactions()
- assert 'count' in result
- assert 'last_sealed' in result
- assert 'last_collected' in result
-
-
-@pytest.mark.order9
-def test_flush_wal_db_level():
- result = db.wal.flush(garbage_collect=False, sync=False)
- assert isinstance(result, bool)
-
-
-@pytest.mark.order10
-def test_wal_errors_db_level():
- bad_wal = ArangoClient(
- username=username,
- password='incorrect',
- verify=False
- ).db(db_name).wal
- with pytest.raises(WALPropertiesError):
- bad_wal.properties()
-
- with pytest.raises(WALConfigureError):
- bad_wal.configure(log_size=2000000)
-
- with pytest.raises(WALTransactionListError):
- bad_wal.transactions()
-
- with pytest.raises(WALFlushError):
- bad_wal.flush(garbage_collect=False, sync=False)
\ No newline at end of file
+ # Test flush with bad database
+ with assert_raises(WALFlushError) as err:
+ bad_db.wal.flush(garbage_collect=False, sync=False)
+ assert err.value.error_code == 1228
diff --git a/tests/utils.py b/tests/utils.py
deleted file mode 100644
index e9c1af4e..00000000
--- a/tests/utils.py
+++ /dev/null
@@ -1,101 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-
-from random import randint
-from uuid import uuid4
-
-
-def arango_version(client):
- """Return the major and minor version of ArangoDB.
-
- :param client: The ArangoDB client.
- :type client: arango.ArangoClient
- :return: The major and minor version numbers.
- :rtype: (int, int)
- """
- version_nums = client.version().split('.')
- return map(int, version_nums[:2])
-
-
-def generate_db_name():
- """Generate and return a random database name.
-
- :returns: A random database name.
- :rtype: str | unicode
- """
- return 'test_database_{}'.format(uuid4().hex)
-
-
-def generate_col_name():
- """Generate and return a random collection name.
-
- :returns: A random collection name.
- :rtype: str | unicode
- """
- return 'test_collection_{}'.format(uuid4().hex)
-
-
-def generate_graph_name():
- """Generate and return a random graph name.
-
- :returns: A random graph name.
- :rtype: str | unicode
- """
- return 'test_graph_{}'.format(uuid4().hex)
-
-
-def generate_task_name():
- """Generate and return a random task name.
-
- :returns: A random task name.
- :rtype: str | unicode
- """
- return 'test_task_{}'.format(uuid4().hex)
-
-
-def generate_task_id():
- """Generate and return a random task ID.
-
- :returns: A random task ID
- :rtype: str | unicode
- """
- return 'test_task_id_{}'.format(uuid4().hex)
-
-
-def generate_user_name():
- """Generate and return a random username.
-
- :returns: A random username.
- :rtype: str | unicode
- """
- return 'test_user_{}'.format(uuid4().hex)
-
-
-def clean_keys(obj):
- """Return the document(s) with all the system keys stripped.
-
- :param obj: document(s)
- :type obj: list |dict | object
- :returns: the document(s) with the system keys stripped
- :rtype: list | dict |object
- """
- if isinstance(obj, dict):
- return {
- k: v for k, v in obj.items()
- if not (k not in {'_key', '_from', '_to'} and k.startswith('_'))
- }
- else:
- return [{
- k: v for k, v in document.items()
- if not (k not in {'_key', '_from', '_to'} and k.startswith('_'))
- } for document in obj]
-
-
-def ordered(documents):
- """Sort the list of the documents by keys and return the list.
-
- :param documents: the list of documents to order
- :type documents: [dict]
- :returns: the ordered list of documents
- :rtype: [dict]
- """
- return sorted(documents, key=lambda doc: doc['_key'])